From a978dcef7f98b7ed8639dda0d8c1e3f2c995f070 Mon Sep 17 00:00:00 2001 From: Vasilios Syrakis Date: Fri, 11 Nov 2022 10:06:24 +1100 Subject: [PATCH] Version bump: * removed precompiled protobuf files * updated download script with 1.24.0 * latest pypi package will contain 1.24.0 protobufs Signed-off-by: Vasilios Syrakis --- CHANGELOG.md | 12 +- poetry.lock | 652 ++--- pyproject.toml | 4 +- src/envoy_data_plane/__init__.py | 0 src/envoy_data_plane/envoy/__init__.py | 0 src/envoy_data_plane/envoy/admin/__init__.py | 0 .../envoy/admin/v2alpha/__init__.py | 750 ------ .../envoy/admin/v3/__init__.py | 943 ------- .../envoy/annotations/__init__.py | 14 - src/envoy_data_plane/envoy/api/__init__.py | 0 src/envoy_data_plane/envoy/api/v2/__init__.py | 2197 ----------------- .../envoy/api/v2/auth/__init__.py | 410 --- .../envoy/api/v2/cluster/__init__.py | 253 -- .../envoy/api/v2/core/__init__.py | 1545 ------------ .../envoy/api/v2/endpoint/__init__.py | 261 -- .../envoy/api/v2/listener/__init__.py | 283 --- .../envoy/api/v2/ratelimit/__init__.py | 47 - .../envoy/api/v2/route/__init__.py | 1532 ------------ src/envoy_data_plane/envoy/config/__init__.py | 0 .../envoy/config/accesslog/__init__.py | 0 .../envoy/config/accesslog/v2/__init__.py | 109 - .../envoy/config/accesslog/v3/__init__.py | 273 -- .../envoy/config/bootstrap/__init__.py | 0 .../envoy/config/bootstrap/v2/__init__.py | 333 --- .../envoy/config/bootstrap/v3/__init__.py | 555 ----- .../envoy/config/cluster/__init__.py | 0 .../config/cluster/aggregate/__init__.py | 0 .../cluster/aggregate/v2alpha/__init__.py | 21 - .../cluster/dynamic_forward_proxy/__init__.py | 0 .../dynamic_forward_proxy/v2alpha/__init__.py | 29 - .../envoy/config/cluster/redis/__init__.py | 39 - .../envoy/config/cluster/v3/__init__.py | 1299 ---------- .../envoy/config/common/__init__.py | 0 .../common/dynamic_forward_proxy/__init__.py | 0 .../dynamic_forward_proxy/v2alpha/__init__.py | 66 - .../envoy/config/common/key_value/__init__.py | 0 .../config/common/key_value/v3/__init__.py | 18 - .../envoy/config/common/matcher/__init__.py | 0 .../config/common/matcher/v3/__init__.py | 242 -- .../config/common/mutation_rules/__init__.py | 0 .../common/mutation_rules/v3/__init__.py | 77 - .../envoy/config/common/tap/__init__.py | 0 .../config/common/tap/v2alpha/__init__.py | 36 - .../envoy/config/core/__init__.py | 0 .../envoy/config/core/v3/__init__.py | 2131 ---------------- .../envoy/config/endpoint/__init__.py | 0 .../envoy/config/endpoint/v3/__init__.py | 361 --- .../envoy/config/filter/__init__.py | 0 .../envoy/config/filter/accesslog/__init__.py | 0 .../config/filter/accesslog/v2/__init__.py | 263 -- .../envoy/config/filter/dubbo/__init__.py | 0 .../config/filter/dubbo/router/__init__.py | 0 .../filter/dubbo/router/v2alpha1/__init__.py | 12 - .../envoy/config/filter/fault/__init__.py | 0 .../envoy/config/filter/fault/v2/__init__.py | 94 - .../envoy/config/filter/http/__init__.py | 0 .../http/adaptive_concurrency/__init__.py | 0 .../adaptive_concurrency/v2alpha/__init__.py | 85 - .../config/filter/http/aws_lambda/__init__.py | 0 .../http/aws_lambda/v2alpha/__init__.py | 38 - .../http/aws_request_signing/__init__.py | 0 .../aws_request_signing/v2alpha/__init__.py | 29 - .../config/filter/http/buffer/__init__.py | 0 .../config/filter/http/buffer/v2/__init__.py | 25 - .../config/filter/http/cache/__init__.py | 0 .../filter/http/cache/v2alpha/__init__.py | 67 - .../config/filter/http/compressor/__init__.py | 0 .../filter/http/compressor/v2/__init__.py | 45 - .../envoy/config/filter/http/cors/__init__.py | 0 .../config/filter/http/cors/v2/__init__.py | 14 - .../envoy/config/filter/http/csrf/__init__.py | 0 .../config/filter/http/csrf/v2/__init__.py | 43 - .../http/dynamic_forward_proxy/__init__.py | 0 .../dynamic_forward_proxy/v2alpha/__init__.py | 57 - .../config/filter/http/dynamo/__init__.py | 0 .../config/filter/http/dynamo/v2/__init__.py | 14 - .../config/filter/http/ext_authz/__init__.py | 0 .../filter/http/ext_authz/v2/__init__.py | 221 -- .../config/filter/http/fault/__init__.py | 0 .../config/filter/http/fault/v2/__init__.py | 114 - .../filter/http/grpc_http1_bridge/__init__.py | 0 .../http/grpc_http1_bridge/v2/__init__.py | 14 - .../grpc_http1_reverse_bridge/__init__.py | 0 .../v2alpha1/__init__.py | 35 - .../config/filter/http/grpc_stats/__init__.py | 0 .../http/grpc_stats/v2alpha/__init__.py | 53 - .../config/filter/http/grpc_web/__init__.py | 0 .../filter/http/grpc_web/v2/__init__.py | 14 - .../envoy/config/filter/http/gzip/__init__.py | 0 .../config/filter/http/gzip/v2/__init__.py | 108 - .../http/header_to_metadata/__init__.py | 0 .../http/header_to_metadata/v2/__init__.py | 67 - .../filter/http/health_check/__init__.py | 0 .../filter/http/health_check/v2/__init__.py | 39 - .../config/filter/http/ip_tagging/__init__.py | 0 .../filter/http/ip_tagging/v2/__init__.py | 38 - .../config/filter/http/jwt_authn/__init__.py | 0 .../filter/http/jwt_authn/v2alpha/__init__.py | 306 --- .../envoy/config/filter/http/lua/__init__.py | 0 .../config/filter/http/lua/v2/__init__.py | 17 - .../config/filter/http/on_demand/__init__.py | 0 .../filter/http/on_demand/v2/__init__.py | 12 - .../filter/http/original_src/__init__.py | 0 .../http/original_src/v2alpha1/__init__.py | 23 - .../config/filter/http/rate_limit/__init__.py | 0 .../filter/http/rate_limit/v2/__init__.py | 48 - .../envoy/config/filter/http/rbac/__init__.py | 0 .../config/filter/http/rbac/v2/__init__.py | 30 - .../config/filter/http/router/__init__.py | 0 .../config/filter/http/router/v2/__init__.py | 52 - .../config/filter/http/squash/__init__.py | 0 .../config/filter/http/squash/v2/__init__.py | 41 - .../envoy/config/filter/http/tap/__init__.py | 0 .../filter/http/tap/v2alpha/__init__.py | 20 - .../config/filter/http/transcoder/__init__.py | 0 .../filter/http/transcoder/v2/__init__.py | 98 - .../envoy/config/filter/listener/__init__.py | 0 .../listener/http_inspector/__init__.py | 0 .../listener/http_inspector/v2/__init__.py | 12 - .../filter/listener/original_dst/__init__.py | 0 .../listener/original_dst/v2/__init__.py | 12 - .../filter/listener/original_src/__init__.py | 0 .../original_src/v2alpha1/__init__.py | 26 - .../listener/proxy_protocol/__init__.py | 0 .../listener/proxy_protocol/v2/__init__.py | 12 - .../filter/listener/tls_inspector/__init__.py | 0 .../listener/tls_inspector/v2/__init__.py | 12 - .../envoy/config/filter/network/__init__.py | 0 .../network/client_ssl_auth/__init__.py | 0 .../network/client_ssl_auth/v2/__init__.py | 31 - .../network/direct_response/__init__.py | 0 .../network/direct_response/v2/__init__.py | 16 - .../filter/network/dubbo_proxy/__init__.py | 0 .../network/dubbo_proxy/v2alpha1/__init__.py | 143 -- .../config/filter/network/echo/__init__.py | 0 .../config/filter/network/echo/v2/__init__.py | 12 - .../filter/network/ext_authz/__init__.py | 0 .../filter/network/ext_authz/v2/__init__.py | 36 - .../http_connection_manager/__init__.py | 0 .../http_connection_manager/v2/__init__.py | 633 ----- .../filter/network/kafka_broker/__init__.py | 0 .../network/kafka_broker/v2alpha1/__init__.py | 14 - .../network/local_rate_limit/__init__.py | 0 .../local_rate_limit/v2alpha/__init__.py | 33 - .../filter/network/mongo_proxy/__init__.py | 0 .../filter/network/mongo_proxy/v2/__init__.py | 30 - .../filter/network/mysql_proxy/__init__.py | 0 .../network/mysql_proxy/v1alpha1/__init__.py | 17 - .../filter/network/rate_limit/__init__.py | 0 .../filter/network/rate_limit/v2/__init__.py | 42 - .../config/filter/network/rbac/__init__.py | 0 .../config/filter/network/rbac/v2/__init__.py | 40 - .../filter/network/redis_proxy/__init__.py | 0 .../filter/network/redis_proxy/v2/__init__.py | 220 -- .../filter/network/sni_cluster/__init__.py | 0 .../filter/network/sni_cluster/v2/__init__.py | 12 - .../filter/network/tcp_proxy/__init__.py | 0 .../filter/network/tcp_proxy/v2/__init__.py | 182 -- .../filter/network/thrift_proxy/__init__.py | 0 .../network/thrift_proxy/v2alpha1/__init__.py | 233 -- .../network/zookeeper_proxy/__init__.py | 0 .../zookeeper_proxy/v1alpha1/__init__.py | 27 - .../envoy/config/filter/thrift/__init__.py | 0 .../filter/thrift/rate_limit/__init__.py | 0 .../thrift/rate_limit/v2alpha1/__init__.py | 40 - .../config/filter/thrift/router/__init__.py | 0 .../filter/thrift/router/v2alpha1/__init__.py | 12 - .../envoy/config/filter/udp/__init__.py | 0 .../config/filter/udp/udp_proxy/__init__.py | 0 .../filter/udp/udp_proxy/v2alpha/__init__.py | 21 - .../envoy/config/grpc_credential/__init__.py | 0 .../grpc_credential/v2alpha/__init__.py | 36 - .../config/grpc_credential/v3/__init__.py | 36 - .../envoy/config/health_checker/__init__.py | 0 .../config/health_checker/redis/__init__.py | 0 .../health_checker/redis/v2/__init__.py | 17 - .../envoy/config/listener/__init__.py | 0 .../envoy/config/listener/v2/__init__.py | 30 - .../envoy/config/listener/v3/__init__.py | 686 ----- .../envoy/config/metrics/__init__.py | 0 .../envoy/config/metrics/v2/__init__.py | 237 -- .../envoy/config/metrics/v3/__init__.py | 276 --- .../envoy/config/overload/__init__.py | 0 .../envoy/config/overload/v2alpha/__init__.py | 71 - .../envoy/config/overload/v3/__init__.py | 132 - .../envoy/config/ratelimit/__init__.py | 0 .../envoy/config/ratelimit/v2/__init__.py | 22 - .../envoy/config/ratelimit/v3/__init__.py | 25 - .../envoy/config/rbac/__init__.py | 0 .../envoy/config/rbac/v2/__init__.py | 227 -- .../envoy/config/rbac/v3/__init__.py | 256 -- .../envoy/config/resource_monitor/__init__.py | 0 .../resource_monitor/fixed_heap/__init__.py | 0 .../fixed_heap/v2alpha/__init__.py | 18 - .../injected_resource/__init__.py | 0 .../injected_resource/v2alpha/__init__.py | 20 - .../envoy/config/retry/__init__.py | 0 .../retry/omit_canary_hosts/__init__.py | 0 .../retry/omit_canary_hosts/v2/__init__.py | 12 - .../retry/omit_host_metadata/__init__.py | 0 .../retry/omit_host_metadata/v2/__init__.py | 25 - .../config/retry/previous_hosts/__init__.py | 0 .../retry/previous_hosts/v2/__init__.py | 12 - .../retry/previous_priorities/__init__.py | 46 - .../envoy/config/route/__init__.py | 0 .../envoy/config/route/v3/__init__.py | 2077 ---------------- .../envoy/config/tap/__init__.py | 0 .../envoy/config/tap/v3/__init__.py | 241 -- .../envoy/config/trace/__init__.py | 0 .../envoy/config/trace/v2/__init__.py | 223 -- .../envoy/config/trace/v2alpha/__init__.py | 24 - .../envoy/config/trace/v3/__init__.py | 313 --- .../envoy/config/transport_socket/__init__.py | 0 .../config/transport_socket/alts/__init__.py | 0 .../transport_socket/alts/v2alpha/__init__.py | 25 - .../transport_socket/raw_buffer/__init__.py | 0 .../raw_buffer/v2/__init__.py | 14 - .../config/transport_socket/tap/__init__.py | 0 .../transport_socket/tap/v2alpha/__init__.py | 27 - src/envoy_data_plane/envoy/data/__init__.py | 0 .../envoy/data/accesslog/__init__.py | 0 .../envoy/data/accesslog/v2/__init__.py | 332 --- .../envoy/data/accesslog/v3/__init__.py | 362 --- .../envoy/data/cluster/__init__.py | 0 .../envoy/data/cluster/v2alpha/__init__.py | 120 - .../envoy/data/cluster/v3/__init__.py | 124 - .../envoy/data/core/__init__.py | 0 .../envoy/data/core/v2alpha/__init__.py | 88 - .../envoy/data/core/v3/__init__.py | 89 - .../envoy/data/dns/__init__.py | 0 .../envoy/data/dns/v2alpha/__init__.py | 72 - .../envoy/data/dns/v3/__init__.py | 142 -- .../envoy/data/tap/__init__.py | 0 .../envoy/data/tap/v2alpha/__init__.py | 205 -- .../envoy/data/tap/v3/__init__.py | 205 -- .../envoy/extensions/__init__.py | 0 .../extensions/access_loggers/__init__.py | 0 .../access_loggers/file/__init__.py | 0 .../access_loggers/file/v3/__init__.py | 64 - .../access_loggers/filters/__init__.py | 0 .../access_loggers/filters/cel/__init__.py | 0 .../access_loggers/filters/cel/v3/__init__.py | 24 - .../access_loggers/grpc/__init__.py | 0 .../access_loggers/grpc/v3/__init__.py | 95 - .../access_loggers/open_telemetry/__init__.py | 0 .../open_telemetry/v3/__init__.py | 48 - .../access_loggers/stream/__init__.py | 0 .../access_loggers/stream/v3/__init__.py | 40 - .../access_loggers/wasm/__init__.py | 0 .../access_loggers/wasm/v3/__init__.py | 21 - .../envoy/extensions/cache/__init__.py | 0 .../cache/simple_http_cache/__init__.py | 0 .../cache/simple_http_cache/v3/__init__.py | 14 - .../envoy/extensions/clusters/__init__.py | 0 .../extensions/clusters/aggregate/__init__.py | 0 .../clusters/aggregate/v3/__init__.py | 21 - .../dynamic_forward_proxy/__init__.py | 0 .../dynamic_forward_proxy/v3/__init__.py | 49 - .../extensions/clusters/redis/__init__.py | 0 .../extensions/clusters/redis/v3/__init__.py | 39 - .../envoy/extensions/common/__init__.py | 0 .../common/dynamic_forward_proxy/__init__.py | 0 .../dynamic_forward_proxy/v3/__init__.py | 142 -- .../extensions/common/matching/__init__.py | 0 .../extensions/common/matching/v3/__init__.py | 38 - .../extensions/common/ratelimit/__init__.py | 0 .../common/ratelimit/v3/__init__.py | 77 - .../envoy/extensions/common/tap/__init__.py | 0 .../extensions/common/tap/v3/__init__.py | 36 - .../envoy/extensions/compression/__init__.py | 0 .../extensions/compression/brotli/__init__.py | 0 .../compression/brotli/compressor/__init__.py | 0 .../brotli/compressor/v3/__init__.py | 51 - .../brotli/decompressor/__init__.py | 0 .../brotli/decompressor/v3/__init__.py | 19 - .../extensions/compression/gzip/__init__.py | 0 .../compression/gzip/compressor/__init__.py | 0 .../gzip/compressor/v3/__init__.py | 77 - .../compression/gzip/decompressor/__init__.py | 0 .../gzip/decompressor/v3/__init__.py | 27 - .../envoy/extensions/filters/__init__.py | 0 .../extensions/filters/common/__init__.py | 0 .../filters/common/dependency/__init__.py | 0 .../filters/common/dependency/v3/__init__.py | 63 - .../filters/common/fault/__init__.py | 0 .../filters/common/fault/v3/__init__.py | 83 - .../filters/common/matcher/__init__.py | 0 .../filters/common/matcher/action/__init__.py | 0 .../common/matcher/action/v3/__init__.py | 24 - .../envoy/extensions/filters/http/__init__.py | 0 .../http/adaptive_concurrency/__init__.py | 0 .../http/adaptive_concurrency/v3/__init__.py | 85 - .../http/admission_control/__init__.py | 0 .../http/admission_control/v3/__init__.py | 97 - .../alternate_protocols_cache/__init__.py | 0 .../alternate_protocols_cache/v3/__init__.py | 27 - .../filters/http/aws_lambda/__init__.py | 0 .../filters/http/aws_lambda/v3/__init__.py | 38 - .../http/aws_request_signing/__init__.py | 0 .../http/aws_request_signing/v3/__init__.py | 52 - .../filters/http/bandwidth_limit/__init__.py | 0 .../http/bandwidth_limit/v3/__init__.py | 58 - .../filters/http/buffer/__init__.py | 0 .../filters/http/buffer/v3/__init__.py | 25 - .../extensions/filters/http/cache/__init__.py | 0 .../filters/http/cache/v3/__init__.py | 70 - .../filters/http/cdn_loop/__init__.py | 0 .../filters/http/cdn_loop/v3/__init__.py | 25 - .../filters/http/composite/__init__.py | 0 .../filters/http/composite/v3/__init__.py | 42 - .../filters/http/compressor/__init__.py | 0 .../filters/http/compressor/v3/__init__.py | 136 - .../extensions/filters/http/cors/__init__.py | 0 .../filters/http/cors/v3/__init__.py | 14 - .../extensions/filters/http/csrf/__init__.py | 0 .../filters/http/csrf/v3/__init__.py | 44 - .../filters/http/decompressor/__init__.py | 0 .../filters/http/decompressor/v3/__init__.py | 68 - .../http/dynamic_forward_proxy/__init__.py | 0 .../http/dynamic_forward_proxy/v3/__init__.py | 65 - .../filters/http/dynamo/__init__.py | 0 .../filters/http/dynamo/v3/__init__.py | 14 - .../filters/http/ext_authz/__init__.py | 0 .../filters/http/ext_authz/v3/__init__.py | 283 --- .../filters/http/ext_proc/__init__.py | 0 .../filters/http/ext_proc/v3/__init__.py | 129 - .../extensions/filters/http/fault/__init__.py | 0 .../filters/http/fault/v3/__init__.py | 130 - .../http/grpc_http1_bridge/__init__.py | 0 .../http/grpc_http1_bridge/v3/__init__.py | 20 - .../grpc_http1_reverse_bridge/__init__.py | 0 .../grpc_http1_reverse_bridge/v3/__init__.py | 52 - .../http/grpc_json_transcoder/__init__.py | 0 .../http/grpc_json_transcoder/v3/__init__.py | 178 -- .../filters/http/grpc_stats/__init__.py | 0 .../filters/http/grpc_stats/v3/__init__.py | 60 - .../filters/http/grpc_web/__init__.py | 0 .../filters/http/grpc_web/v3/__init__.py | 14 - .../extensions/filters/http/gzip/__init__.py | 0 .../filters/http/gzip/v3/__init__.py | 74 - .../http/header_to_metadata/__init__.py | 0 .../http/header_to_metadata/v3/__init__.py | 86 - .../filters/http/health_check/__init__.py | 0 .../filters/http/health_check/v3/__init__.py | 39 - .../filters/http/ip_tagging/__init__.py | 0 .../filters/http/ip_tagging/v3/__init__.py | 38 - .../filters/http/jwt_authn/__init__.py | 0 .../filters/http/jwt_authn/v3/__init__.py | 440 ---- .../filters/http/kill_request/__init__.py | 0 .../filters/http/kill_request/v3/__init__.py | 29 - .../filters/http/local_ratelimit/__init__.py | 0 .../http/local_ratelimit/v3/__init__.py | 87 - .../extensions/filters/http/lua/__init__.py | 0 .../filters/http/lua/v3/__init__.py | 46 - .../filters/http/oauth2/__init__.py | 0 .../filters/http/oauth2/v3/__init__.py | 97 - .../filters/http/on_demand/__init__.py | 0 .../filters/http/on_demand/v3/__init__.py | 12 - .../filters/http/original_src/__init__.py | 0 .../filters/http/original_src/v3/__init__.py | 23 - .../filters/http/ratelimit/__init__.py | 0 .../filters/http/ratelimit/v3/__init__.py | 328 --- .../extensions/filters/http/rbac/__init__.py | 0 .../filters/http/rbac/v3/__init__.py | 34 - .../filters/http/router/__init__.py | 0 .../filters/http/router/v3/__init__.py | 62 - .../filters/http/set_metadata/__init__.py | 0 .../filters/http/set_metadata/v3/__init__.py | 20 - .../filters/http/stateful_session/__init__.py | 0 .../http/stateful_session/v3/__init__.py | 31 - .../extensions/filters/http/tap/__init__.py | 0 .../filters/http/tap/v3/__init__.py | 20 - .../extensions/filters/http/wasm/__init__.py | 0 .../filters/http/wasm/v3/__init__.py | 16 - .../extensions/filters/listener/__init__.py | 0 .../listener/http_inspector/__init__.py | 0 .../listener/http_inspector/v3/__init__.py | 12 - .../filters/listener/original_dst/__init__.py | 0 .../listener/original_dst/v3/__init__.py | 12 - .../filters/listener/original_src/__init__.py | 0 .../listener/original_src/v3/__init__.py | 26 - .../listener/proxy_protocol/__init__.py | 0 .../listener/proxy_protocol/v3/__init__.py | 37 - .../listener/tls_inspector/__init__.py | 0 .../listener/tls_inspector/v3/__init__.py | 17 - .../extensions/filters/network/__init__.py | 0 .../network/client_ssl_auth/__init__.py | 0 .../network/client_ssl_auth/v3/__init__.py | 33 - .../network/connection_limit/__init__.py | 0 .../network/connection_limit/v3/__init__.py | 36 - .../network/direct_response/__init__.py | 0 .../network/direct_response/v3/__init__.py | 16 - .../filters/network/dubbo_proxy/__init__.py | 0 .../network/dubbo_proxy/router/__init__.py | 0 .../network/dubbo_proxy/router/v3/__init__.py | 12 - .../network/dubbo_proxy/v3/__init__.py | 150 -- .../filters/network/echo/__init__.py | 0 .../filters/network/echo/v3/__init__.py | 12 - .../filters/network/ext_authz/__init__.py | 0 .../filters/network/ext_authz/v3/__init__.py | 53 - .../http_connection_manager/__init__.py | 0 .../http_connection_manager/v3/__init__.py | 873 ------- .../network/local_ratelimit/__init__.py | 0 .../network/local_ratelimit/v3/__init__.py | 33 - .../network/meta_protocol_proxy/__init__.py | 0 .../meta_protocol_proxy/matcher/__init__.py | 0 .../matcher/action/__init__.py | 0 .../matcher/action/v3/__init__.py | 24 - .../matcher/v3/__init__.py | 39 - .../meta_protocol_proxy/v3/__init__.py | 98 - .../filters/network/mongo_proxy/__init__.py | 0 .../network/mongo_proxy/v3/__init__.py | 38 - .../filters/network/ratelimit/__init__.py | 0 .../filters/network/ratelimit/v3/__init__.py | 42 - .../filters/network/rbac/__init__.py | 0 .../filters/network/rbac/v3/__init__.py | 44 - .../filters/network/redis_proxy/__init__.py | 0 .../network/redis_proxy/v3/__init__.py | 248 -- .../filters/network/sni_cluster/__init__.py | 0 .../network/sni_cluster/v3/__init__.py | 12 - .../sni_dynamic_forward_proxy/__init__.py | 0 .../sni_dynamic_forward_proxy/v3/__init__.py | 33 - .../filters/network/tcp_proxy/__init__.py | 0 .../filters/network/tcp_proxy/v3/__init__.py | 130 - .../filters/network/thrift_proxy/__init__.py | 0 .../network/thrift_proxy/filters/__init__.py | 0 .../filters/header_to_metadata/__init__.py | 0 .../filters/header_to_metadata/v3/__init__.py | 76 - .../filters/ratelimit/__init__.py | 0 .../filters/ratelimit/v3/__init__.py | 40 - .../network/thrift_proxy/router/__init__.py | 0 .../thrift_proxy/router/v3/__init__.py | 12 - .../network/thrift_proxy/v3/__init__.py | 264 -- .../filters/network/wasm/__init__.py | 0 .../filters/network/wasm/v3/__init__.py | 16 - .../network/zookeeper_proxy/__init__.py | 0 .../network/zookeeper_proxy/v3/__init__.py | 27 - .../envoy/extensions/filters/udp/__init__.py | 0 .../filters/udp/dns_filter/__init__.py | 0 .../filters/udp/dns_filter/v3/__init__.py | 116 - .../filters/udp/udp_proxy/__init__.py | 0 .../filters/udp/udp_proxy/v3/__init__.py | 80 - .../envoy/extensions/formatter/__init__.py | 0 .../extensions/formatter/metadata/__init__.py | 0 .../formatter/metadata/v3/__init__.py | 12 - .../formatter/req_without_query/__init__.py | 0 .../req_without_query/v3/__init__.py | 14 - .../extensions/health_checkers/__init__.py | 0 .../health_checkers/redis/__init__.py | 0 .../health_checkers/redis/v3/__init__.py | 17 - .../envoy/extensions/http/__init__.py | 0 .../http/header_formatters/__init__.py | 0 .../preserve_case/__init__.py | 0 .../preserve_case/v3/__init__.py | 20 - .../http/original_ip_detection/__init__.py | 0 .../custom_header/__init__.py | 0 .../custom_header/v3/__init__.py | 40 - .../original_ip_detection/xff/__init__.py | 0 .../original_ip_detection/xff/v3/__init__.py | 23 - .../http/stateful_session/__init__.py | 0 .../http/stateful_session/cookie/__init__.py | 0 .../stateful_session/cookie/v3/__init__.py | 36 - .../extensions/internal_redirect/__init__.py | 0 .../allow_listed_routes/__init__.py | 0 .../allow_listed_routes/v3/__init__.py | 23 - .../previous_routes/__init__.py | 0 .../previous_routes/v3/__init__.py | 19 - .../safe_cross_scheme/__init__.py | 0 .../safe_cross_scheme/v3/__init__.py | 21 - .../envoy/extensions/key_value/__init__.py | 0 .../key_value/file_based/__init__.py | 0 .../key_value/file_based/v3/__init__.py | 22 - .../envoy/extensions/matching/__init__.py | 0 .../matching/common_inputs/__init__.py | 0 .../environment_variable/__init__.py | 0 .../environment_variable/v3/__init__.py | 15 - .../matching/input_matchers/__init__.py | 0 .../consistent_hashing/__init__.py | 0 .../consistent_hashing/v3/__init__.py | 34 - .../matching/input_matchers/ip/__init__.py | 0 .../matching/input_matchers/ip/v3/__init__.py | 34 - .../envoy/extensions/network/__init__.py | 0 .../network/dns_resolver/__init__.py | 0 .../network/dns_resolver/apple/__init__.py | 0 .../network/dns_resolver/apple/v3/__init__.py | 14 - .../network/dns_resolver/cares/__init__.py | 0 .../network/dns_resolver/cares/v3/__init__.py | 40 - .../network/socket_interface/__init__.py | 0 .../network/socket_interface/v3/__init__.py | 17 - .../envoy/extensions/quic/__init__.py | 0 .../extensions/quic/crypto_stream/__init__.py | 0 .../quic/crypto_stream/v3/__init__.py | 16 - .../extensions/quic/proof_source/__init__.py | 0 .../quic/proof_source/v3/__init__.py | 14 - .../rate_limit_descriptors/__init__.py | 0 .../rate_limit_descriptors/expr/__init__.py | 0 .../expr/v3/__init__.py | 33 - .../envoy/extensions/rbac/__init__.py | 0 .../extensions/rbac/matchers/__init__.py | 0 .../matchers/upstream_ip_port/__init__.py | 0 .../matchers/upstream_ip_port/v3/__init__.py | 33 - .../envoy/extensions/request_id/__init__.py | 0 .../extensions/request_id/uuid/__init__.py | 0 .../extensions/request_id/uuid/v3/__init__.py | 45 - .../extensions/resource_monitors/__init__.py | 0 .../resource_monitors/fixed_heap/__init__.py | 0 .../fixed_heap/v3/__init__.py | 18 - .../injected_resource/__init__.py | 0 .../injected_resource/v3/__init__.py | 20 - .../envoy/extensions/retry/__init__.py | 0 .../envoy/extensions/retry/host/__init__.py | 0 .../retry/host/omit_canary_hosts/__init__.py | 0 .../host/omit_canary_hosts/v3/__init__.py | 12 - .../retry/host/omit_host_metadata/__init__.py | 0 .../host/omit_host_metadata/v3/__init__.py | 25 - .../retry/host/previous_hosts/__init__.py | 0 .../retry/host/previous_hosts/v3/__init__.py | 12 - .../extensions/retry/priority/__init__.py | 0 .../priority/previous_priorities/__init__.py | 0 .../previous_priorities/v3/__init__.py | 46 - .../envoy/extensions/stat_sinks/__init__.py | 0 .../stat_sinks/graphite_statsd/__init__.py | 0 .../stat_sinks/graphite_statsd/v3/__init__.py | 31 - .../extensions/stat_sinks/wasm/__init__.py | 0 .../extensions/stat_sinks/wasm/v3/__init__.py | 16 - .../extensions/transport_sockets/__init__.py | 0 .../transport_sockets/alts/__init__.py | 0 .../transport_sockets/alts/v3/__init__.py | 27 - .../proxy_protocol/__init__.py | 0 .../proxy_protocol/v3/__init__.py | 22 - .../transport_sockets/quic/__init__.py | 0 .../transport_sockets/quic/v3/__init__.py | 32 - .../transport_sockets/raw_buffer/__init__.py | 0 .../raw_buffer/v3/__init__.py | 14 - .../transport_sockets/s2a/__init__.py | 0 .../transport_sockets/s2a/v3/__init__.py | 20 - .../transport_sockets/starttls/__init__.py | 0 .../transport_sockets/starttls/v3/__init__.py | 47 - .../transport_sockets/tap/__init__.py | 0 .../transport_sockets/tap/v3/__init__.py | 29 - .../transport_sockets/tcp_stats/__init__.py | 0 .../tcp_stats/v3/__init__.py | 32 - .../transport_sockets/tls/__init__.py | 0 .../transport_sockets/tls/v3/__init__.py | 753 ------ .../envoy/extensions/upstreams/__init__.py | 0 .../extensions/upstreams/http/__init__.py | 0 .../upstreams/http/generic/__init__.py | 0 .../upstreams/http/generic/v3/__init__.py | 18 - .../upstreams/http/http/__init__.py | 0 .../upstreams/http/http/v3/__init__.py | 17 - .../extensions/upstreams/http/tcp/__init__.py | 0 .../upstreams/http/tcp/v3/__init__.py | 17 - .../extensions/upstreams/http/v3/__init__.py | 146 -- .../extensions/upstreams/tcp/__init__.py | 0 .../upstreams/tcp/generic/__init__.py | 0 .../upstreams/tcp/generic/v3/__init__.py | 17 - .../envoy/extensions/wasm/__init__.py | 0 .../envoy/extensions/wasm/v3/__init__.py | 160 -- .../envoy/extensions/watchdog/__init__.py | 0 .../watchdog/profile_action/__init__.py | 0 .../watchdog/profile_action/v3/__init__.py | 22 - .../envoy/service/__init__.py | 0 .../envoy/service/accesslog/__init__.py | 0 .../envoy/service/accesslog/v2/__init__.py | 107 - .../envoy/service/accesslog/v3/__init__.py | 108 - .../envoy/service/auth/__init__.py | 0 .../envoy/service/auth/v2/__init__.py | 236 -- .../envoy/service/auth/v2alpha/__init__.py | 53 - .../envoy/service/auth/v3/__init__.py | 309 --- .../envoy/service/cluster/__init__.py | 0 .../envoy/service/cluster/v3/__init__.py | 163 -- .../envoy/service/discovery/__init__.py | 0 .../envoy/service/discovery/v2/__init__.py | 581 ----- .../envoy/service/discovery/v3/__init__.py | 335 --- .../envoy/service/endpoint/__init__.py | 0 .../envoy/service/endpoint/v3/__init__.py | 220 -- .../envoy/service/event_reporting/__init__.py | 0 .../event_reporting/v2alpha/__init__.py | 94 - .../service/event_reporting/v3/__init__.py | 94 - .../envoy/service/ext_proc/__init__.py | 0 .../envoy/service/ext_proc/v3/__init__.py | 325 --- .../envoy/service/extension/__init__.py | 0 .../envoy/service/extension/v3/__init__.py | 169 -- .../envoy/service/health/__init__.py | 0 .../envoy/service/health/v3/__init__.py | 230 -- .../envoy/service/listener/__init__.py | 0 .../envoy/service/listener/v3/__init__.py | 163 -- .../envoy/service/load_stats/__init__.py | 0 .../envoy/service/load_stats/v2/__init__.py | 102 - .../envoy/service/load_stats/v3/__init__.py | 98 - .../envoy/service/metrics/__init__.py | 0 .../envoy/service/metrics/v2/__init__.py | 74 - .../envoy/service/metrics/v3/__init__.py | 74 - .../envoy/service/ratelimit/__init__.py | 0 .../envoy/service/ratelimit/v2/__init__.py | 156 -- .../envoy/service/ratelimit/v3/__init__.py | 255 -- .../envoy/service/route/__init__.py | 0 .../envoy/service/route/v3/__init__.py | 358 --- .../envoy/service/runtime/__init__.py | 0 .../envoy/service/runtime/v3/__init__.py | 176 -- .../envoy/service/secret/__init__.py | 0 .../envoy/service/secret/v3/__init__.py | 162 -- .../envoy/service/status/__init__.py | 0 .../envoy/service/status/v2/__init__.py | 154 -- .../envoy/service/status/v3/__init__.py | 256 -- .../envoy/service/tap/__init__.py | 0 .../envoy/service/tap/v2alpha/__init__.py | 257 -- .../envoy/service/tap/v3/__init__.py | 85 - .../envoy/service/trace/__init__.py | 0 .../envoy/service/trace/v2/__init__.py | 72 - .../envoy/service/trace/v3/__init__.py | 72 - src/envoy_data_plane/envoy/type/__init__.py | 213 -- .../envoy/type/http/__init__.py | 0 .../envoy/type/http/v3/__init__.py | 77 - .../envoy/type/matcher/__init__.py | 280 --- .../envoy/type/matcher/v3/__init__.py | 331 --- .../envoy/type/metadata/__init__.py | 0 .../envoy/type/metadata/v2/__init__.py | 94 - .../envoy/type/metadata/v3/__init__.py | 94 - .../envoy/type/tracing/__init__.py | 0 .../envoy/type/tracing/v2/__init__.py | 75 - .../envoy/type/tracing/v3/__init__.py | 76 - .../envoy/type/v3/__init__.py | 245 -- .../envoy/watchdog/__init__.py | 0 .../envoy/watchdog/v3/__init__.py | 24 - src/envoy_data_plane/google/__init__.py | 0 src/envoy_data_plane/google/api/__init__.py | 224 -- .../google/api/expr/__init__.py | 0 .../google/api/expr/v1alpha1/__init__.py | 536 ---- src/envoy_data_plane/google/rpc/__init__.py | 35 - src/envoy_data_plane/io/__init__.py | 0 .../io/prometheus/__init__.py | 0 .../io/prometheus/client/__init__.py | 92 - src/envoy_data_plane/opencensus/__init__.py | 0 .../opencensus/proto/__init__.py | 0 .../opencensus/proto/resource/__init__.py | 0 .../opencensus/proto/resource/v1/__init__.py | 20 - .../opencensus/proto/trace/__init__.py | 0 .../opencensus/proto/trace/v1/__init__.py | 424 ---- .../opentelemetry/__init__.py | 0 .../opentelemetry/proto/__init__.py | 0 .../opentelemetry/proto/common/__init__.py | 0 .../opentelemetry/proto/common/v1/__init__.py | 89 - src/envoy_data_plane/udpa/__init__.py | 0 .../udpa/annotations/__init__.py | 71 - src/envoy_data_plane/validate/__init__.py | 711 ------ src/envoy_data_plane/xds/__init__.py | 0 .../xds/annotations/__init__.py | 0 .../xds/annotations/v3/__init__.py | 45 - src/envoy_data_plane/xds/core/__init__.py | 0 src/envoy_data_plane/xds/core/v3/__init__.py | 167 -- src/envoy_data_plane/xds/type/__init__.py | 0 .../xds/type/matcher/__init__.py | 0 .../xds/type/matcher/v3/__init__.py | 217 -- utils/download_protobufs.py | 2 +- 655 files changed, 360 insertions(+), 43699 deletions(-) delete mode 100644 src/envoy_data_plane/__init__.py delete mode 100644 src/envoy_data_plane/envoy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/admin/__init__.py delete mode 100644 src/envoy_data_plane/envoy/admin/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/admin/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/annotations/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/auth/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/cluster/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/core/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/endpoint/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/listener/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/api/v2/route/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/accesslog/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/accesslog/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/accesslog/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/bootstrap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/bootstrap/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/bootstrap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/cluster/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/cluster/aggregate/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/cluster/aggregate/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/cluster/dynamic_forward_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/cluster/dynamic_forward_proxy/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/cluster/redis/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/cluster/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/dynamic_forward_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/dynamic_forward_proxy/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/key_value/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/key_value/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/matcher/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/matcher/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/mutation_rules/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/mutation_rules/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/common/tap/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/core/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/core/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/endpoint/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/endpoint/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/accesslog/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/accesslog/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/dubbo/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/dubbo/router/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/dubbo/router/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/fault/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/fault/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/adaptive_concurrency/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/adaptive_concurrency/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/aws_lambda/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/aws_lambda/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/aws_request_signing/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/aws_request_signing/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/buffer/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/buffer/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/cache/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/cache/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/compressor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/compressor/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/cors/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/cors/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/csrf/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/csrf/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/dynamic_forward_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/dynamo/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/dynamo/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/ext_authz/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/ext_authz/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/fault/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/fault/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_http1_bridge/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_http1_bridge/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_http1_reverse_bridge/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_stats/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_stats/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_web/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/grpc_web/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/gzip/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/gzip/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/header_to_metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/header_to_metadata/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/health_check/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/health_check/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/ip_tagging/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/ip_tagging/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/jwt_authn/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/jwt_authn/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/lua/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/lua/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/on_demand/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/on_demand/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/original_src/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/original_src/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/rate_limit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/rate_limit/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/rbac/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/rbac/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/router/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/router/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/squash/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/squash/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/tap/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/transcoder/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/http/transcoder/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/http_inspector/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/http_inspector/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/original_dst/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/original_dst/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/original_src/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/original_src/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/proxy_protocol/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/proxy_protocol/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/tls_inspector/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/listener/tls_inspector/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/client_ssl_auth/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/client_ssl_auth/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/direct_response/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/direct_response/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/dubbo_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/dubbo_proxy/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/echo/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/echo/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/ext_authz/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/ext_authz/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/http_connection_manager/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/http_connection_manager/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/kafka_broker/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/kafka_broker/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/local_rate_limit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/local_rate_limit/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/mongo_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/mongo_proxy/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/mysql_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/mysql_proxy/v1alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/rate_limit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/rate_limit/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/rbac/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/rbac/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/redis_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/redis_proxy/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/sni_cluster/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/sni_cluster/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/tcp_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/tcp_proxy/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/thrift_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/thrift_proxy/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/zookeeper_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/network/zookeeper_proxy/v1alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/thrift/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/thrift/rate_limit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/thrift/rate_limit/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/thrift/router/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/thrift/router/v2alpha1/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/udp/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/udp/udp_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/filter/udp/udp_proxy/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/grpc_credential/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/grpc_credential/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/grpc_credential/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/health_checker/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/health_checker/redis/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/health_checker/redis/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/listener/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/listener/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/listener/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/metrics/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/metrics/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/metrics/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/overload/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/overload/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/overload/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/ratelimit/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/rbac/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/rbac/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/rbac/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/resource_monitor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/resource_monitor/fixed_heap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/resource_monitor/fixed_heap/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/resource_monitor/injected_resource/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/resource_monitor/injected_resource/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/omit_canary_hosts/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/omit_canary_hosts/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/omit_host_metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/omit_host_metadata/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/previous_hosts/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/previous_hosts/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/retry/previous_priorities/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/route/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/route/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/tap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/trace/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/trace/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/trace/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/trace/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/transport_socket/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/transport_socket/alts/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/transport_socket/alts/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/transport_socket/raw_buffer/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/transport_socket/raw_buffer/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/transport_socket/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/config/transport_socket/tap/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/accesslog/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/accesslog/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/accesslog/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/cluster/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/cluster/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/cluster/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/core/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/core/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/core/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/dns/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/dns/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/dns/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/tap/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/data/tap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/file/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/file/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/filters/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/filters/cel/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/filters/cel/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/grpc/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/grpc/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/open_telemetry/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/open_telemetry/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/stream/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/stream/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/wasm/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/access_loggers/wasm/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/cache/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/cache/simple_http_cache/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/cache/simple_http_cache/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/clusters/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/clusters/aggregate/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/clusters/aggregate/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/clusters/dynamic_forward_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/clusters/dynamic_forward_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/clusters/redis/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/clusters/redis/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/dynamic_forward_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/dynamic_forward_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/matching/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/matching/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/common/tap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/brotli/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/brotli/compressor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/brotli/compressor/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/brotli/decompressor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/brotli/decompressor/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/gzip/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/gzip/compressor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/gzip/compressor/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/gzip/decompressor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/compression/gzip/decompressor/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/dependency/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/dependency/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/fault/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/fault/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/matcher/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/matcher/action/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/common/matcher/action/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/adaptive_concurrency/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/adaptive_concurrency/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/admission_control/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/admission_control/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/alternate_protocols_cache/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/alternate_protocols_cache/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/aws_lambda/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/aws_lambda/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/aws_request_signing/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/aws_request_signing/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/bandwidth_limit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/bandwidth_limit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/buffer/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/buffer/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/cache/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/cache/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/cdn_loop/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/cdn_loop/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/composite/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/composite/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/compressor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/compressor/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/cors/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/cors/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/csrf/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/csrf/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/decompressor/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/decompressor/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/dynamic_forward_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/dynamic_forward_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/dynamo/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/dynamo/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ext_authz/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ext_authz/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ext_proc/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ext_proc/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/fault/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/fault/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_bridge/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_bridge/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_json_transcoder/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_json_transcoder/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_stats/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_stats/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_web/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/grpc_web/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/gzip/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/gzip/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/header_to_metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/header_to_metadata/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/health_check/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/health_check/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ip_tagging/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ip_tagging/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/jwt_authn/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/jwt_authn/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/kill_request/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/kill_request/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/local_ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/local_ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/lua/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/lua/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/oauth2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/oauth2/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/on_demand/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/on_demand/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/original_src/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/original_src/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/rbac/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/rbac/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/router/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/router/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/set_metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/set_metadata/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/stateful_session/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/stateful_session/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/tap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/wasm/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/http/wasm/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/http_inspector/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/http_inspector/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/original_dst/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/original_dst/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/original_src/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/original_src/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/proxy_protocol/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/proxy_protocol/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/tls_inspector/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/listener/tls_inspector/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/client_ssl_auth/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/client_ssl_auth/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/connection_limit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/connection_limit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/direct_response/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/direct_response/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/router/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/router/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/echo/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/echo/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/ext_authz/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/ext_authz/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/http_connection_manager/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/http_connection_manager/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/local_ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/local_ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/mongo_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/mongo_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/rbac/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/rbac/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/redis_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/redis_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/sni_cluster/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/sni_cluster/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/tcp_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/tcp_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/router/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/router/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/wasm/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/wasm/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/zookeeper_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/network/zookeeper_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/udp/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/udp/dns_filter/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/udp/dns_filter/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/udp/udp_proxy/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/filters/udp/udp_proxy/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/formatter/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/formatter/metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/formatter/metadata/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/formatter/req_without_query/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/formatter/req_without_query/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/health_checkers/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/health_checkers/redis/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/health_checkers/redis/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/header_formatters/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/header_formatters/preserve_case/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/header_formatters/preserve_case/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/original_ip_detection/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/original_ip_detection/custom_header/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/original_ip_detection/custom_header/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/original_ip_detection/xff/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/original_ip_detection/xff/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/stateful_session/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/stateful_session/cookie/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/http/stateful_session/cookie/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/internal_redirect/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/internal_redirect/allow_listed_routes/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/internal_redirect/allow_listed_routes/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/internal_redirect/previous_routes/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/internal_redirect/previous_routes/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/internal_redirect/safe_cross_scheme/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/internal_redirect/safe_cross_scheme/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/key_value/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/key_value/file_based/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/key_value/file_based/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/common_inputs/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/common_inputs/environment_variable/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/common_inputs/environment_variable/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/input_matchers/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/input_matchers/consistent_hashing/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/input_matchers/consistent_hashing/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/input_matchers/ip/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/matching/input_matchers/ip/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/dns_resolver/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/dns_resolver/apple/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/dns_resolver/apple/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/dns_resolver/cares/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/dns_resolver/cares/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/socket_interface/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/network/socket_interface/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/quic/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/quic/crypto_stream/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/quic/crypto_stream/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/quic/proof_source/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/quic/proof_source/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/expr/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/expr/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/rbac/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/rbac/matchers/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/rbac/matchers/upstream_ip_port/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/rbac/matchers/upstream_ip_port/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/request_id/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/request_id/uuid/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/request_id/uuid/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/resource_monitors/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/resource_monitors/fixed_heap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/resource_monitors/fixed_heap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/resource_monitors/injected_resource/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/resource_monitors/injected_resource/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/host/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/host/omit_canary_hosts/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/host/omit_canary_hosts/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/host/omit_host_metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/host/omit_host_metadata/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/host/previous_hosts/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/host/previous_hosts/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/priority/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/priority/previous_priorities/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/retry/priority/previous_priorities/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/stat_sinks/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/stat_sinks/graphite_statsd/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/stat_sinks/graphite_statsd/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/stat_sinks/wasm/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/stat_sinks/wasm/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/alts/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/alts/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/proxy_protocol/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/proxy_protocol/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/quic/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/quic/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/raw_buffer/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/raw_buffer/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/s2a/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/s2a/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/starttls/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/starttls/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/tap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/tcp_stats/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/tcp_stats/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/tls/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/transport_sockets/tls/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/generic/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/generic/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/http/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/http/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/tcp/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/tcp/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/http/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/tcp/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/tcp/generic/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/upstreams/tcp/generic/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/wasm/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/wasm/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/watchdog/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/watchdog/profile_action/__init__.py delete mode 100644 src/envoy_data_plane/envoy/extensions/watchdog/profile_action/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/accesslog/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/accesslog/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/accesslog/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/auth/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/auth/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/auth/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/auth/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/cluster/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/cluster/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/discovery/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/discovery/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/discovery/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/endpoint/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/endpoint/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/event_reporting/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/event_reporting/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/event_reporting/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/ext_proc/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/ext_proc/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/extension/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/extension/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/health/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/health/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/listener/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/listener/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/load_stats/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/load_stats/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/load_stats/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/metrics/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/metrics/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/metrics/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/ratelimit/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/ratelimit/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/ratelimit/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/route/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/route/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/runtime/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/runtime/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/secret/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/secret/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/status/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/status/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/status/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/tap/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/tap/v2alpha/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/tap/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/trace/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/trace/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/service/trace/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/http/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/http/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/matcher/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/matcher/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/metadata/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/metadata/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/metadata/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/tracing/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/tracing/v2/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/tracing/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/type/v3/__init__.py delete mode 100644 src/envoy_data_plane/envoy/watchdog/__init__.py delete mode 100644 src/envoy_data_plane/envoy/watchdog/v3/__init__.py delete mode 100644 src/envoy_data_plane/google/__init__.py delete mode 100644 src/envoy_data_plane/google/api/__init__.py delete mode 100644 src/envoy_data_plane/google/api/expr/__init__.py delete mode 100644 src/envoy_data_plane/google/api/expr/v1alpha1/__init__.py delete mode 100644 src/envoy_data_plane/google/rpc/__init__.py delete mode 100644 src/envoy_data_plane/io/__init__.py delete mode 100644 src/envoy_data_plane/io/prometheus/__init__.py delete mode 100644 src/envoy_data_plane/io/prometheus/client/__init__.py delete mode 100644 src/envoy_data_plane/opencensus/__init__.py delete mode 100644 src/envoy_data_plane/opencensus/proto/__init__.py delete mode 100644 src/envoy_data_plane/opencensus/proto/resource/__init__.py delete mode 100644 src/envoy_data_plane/opencensus/proto/resource/v1/__init__.py delete mode 100644 src/envoy_data_plane/opencensus/proto/trace/__init__.py delete mode 100644 src/envoy_data_plane/opencensus/proto/trace/v1/__init__.py delete mode 100644 src/envoy_data_plane/opentelemetry/__init__.py delete mode 100644 src/envoy_data_plane/opentelemetry/proto/__init__.py delete mode 100644 src/envoy_data_plane/opentelemetry/proto/common/__init__.py delete mode 100644 src/envoy_data_plane/opentelemetry/proto/common/v1/__init__.py delete mode 100644 src/envoy_data_plane/udpa/__init__.py delete mode 100644 src/envoy_data_plane/udpa/annotations/__init__.py delete mode 100644 src/envoy_data_plane/validate/__init__.py delete mode 100644 src/envoy_data_plane/xds/__init__.py delete mode 100644 src/envoy_data_plane/xds/annotations/__init__.py delete mode 100644 src/envoy_data_plane/xds/annotations/v3/__init__.py delete mode 100644 src/envoy_data_plane/xds/core/__init__.py delete mode 100644 src/envoy_data_plane/xds/core/v3/__init__.py delete mode 100644 src/envoy_data_plane/xds/type/__init__.py delete mode 100644 src/envoy_data_plane/xds/type/matcher/__init__.py delete mode 100644 src/envoy_data_plane/xds/type/matcher/v3/__init__.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ff8058..2aa6db4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ -0.2.6 - 23 July 2021 --------------------- +0.4.0 - 11 Nov 2021 +------------------- +* Updated protobufs to Envoy 1.24.0 + +0.3.0 - 9 Feb 2021 +------------------- +* Updated protobufs to Envoy 1.21.0 + +0.2.6 - 23 Jul 2021 +------------------- * Added prometheus to protoc script * Made protoc script compile all protos at the same time, since this appears to work now * Added github actions workflows diff --git a/poetry.lock b/poetry.lock index 8f580f9..8b88dc9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,6 +1,6 @@ [[package]] name = "atomicwrites" -version = "1.4.0" +version = "1.4.1" description = "Atomic file writes." category = "dev" optional = false @@ -8,21 +8,21 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "attrs" -version = "21.4.0" +version = "22.1.0" description = "Classes Without Boilerplate" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.5" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] +tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] [[package]] name = "betterproto" -version = "2.0.0b4" +version = "2.0.0b5" description = "A better Protobuf / gRPC generator & library" category = "main" optional = false @@ -31,26 +31,27 @@ python-versions = ">=3.6.2,<4.0" [package.dependencies] black = {version = ">=19.3b0", optional = true, markers = "extra == \"compiler\""} grpclib = ">=0.4.1,<0.5.0" -jinja2 = {version = ">=2.11.2,<3.0.0", optional = true, markers = "extra == \"compiler\""} +isort = {version = ">=5.10.1,<6.0.0", optional = true, markers = "extra == \"compiler\""} +jinja2 = {version = ">=3.0.3", optional = true, markers = "extra == \"compiler\""} python-dateutil = ">=2.8,<3.0" [package.extras] -compiler = ["black (>=19.3b0)", "jinja2 (>=2.11.2,<3.0.0)"] +compiler = ["black (>=19.3b0)", "isort (>=5.10.1,<6.0.0)", "jinja2 (>=3.0.3)"] [[package]] name = "black" -version = "22.1.0" +version = "22.10.0" description = "The uncompromising code formatter." category = "dev" optional = false -python-versions = ">=3.6.2" +python-versions = ">=3.7" [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" pathspec = ">=0.9.0" platformdirs = ">=2" -tomli = ">=1.1.0" +tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} @@ -62,30 +63,30 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2021.10.8" +version = "2022.9.24" description = "Python package for providing Mozilla's CA Bundle." category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "charset-normalizer" -version = "2.0.11" +version = "2.1.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "dev" optional = false -python-versions = ">=3.5.0" +python-versions = ">=3.6.0" [package.extras] unicode_backport = ["unicodedata2"] [[package]] name = "click" -version = "8.0.3" +version = "8.1.3" description = "Composable command line interface toolkit" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -93,50 +94,54 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" -version = "0.4.4" +version = "0.4.6" description = "Cross-platform colored terminal text." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" [[package]] name = "grpcio" -version = "1.43.0" +version = "1.50.0" description = "HTTP/2-based RPC framework" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] six = ">=1.5.2" [package.extras] -protobuf = ["grpcio-tools (>=1.43.0)"] +protobuf = ["grpcio-tools (>=1.50.0)"] [[package]] name = "grpcio-tools" -version = "1.43.0" +version = "1.48.2" description = "Protobuf code generator for gRPC" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] -grpcio = ">=1.43.0" -protobuf = ">=3.5.0.post1,<4.0dev" +grpcio = ">=1.48.2" +protobuf = ">=3.12.0,<4.0dev" +setuptools = "*" [[package]] name = "grpclib" -version = "0.4.2" +version = "0.4.3" description = "Pure-Python gRPC implementation for asyncio" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] h2 = ">=3.1.0,<5" multidict = "*" +[package.extras] +protobuf = ["protobuf (>=3.15.0)"] + [[package]] name = "h2" version = "4.1.0" @@ -167,7 +172,7 @@ python-versions = ">=3.6.1" [[package]] name = "idna" -version = "3.3" +version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" category = "dev" optional = false @@ -175,7 +180,7 @@ python-versions = ">=3.5" [[package]] name = "importlib-metadata" -version = "4.10.1" +version = "5.0.0" description = "Read metadata from Python packages" category = "dev" optional = false @@ -186,9 +191,9 @@ typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "iniconfig" @@ -199,26 +204,40 @@ optional = false python-versions = "*" [[package]] -name = "jinja2" -version = "2.11.3" +name = "isort" +version = "5.10.1" +description = "A Python utility / library to sort Python imports." +category = "dev" +optional = false +python-versions = ">=3.6.1,<4.0" + +[package.extras] +colors = ["colorama (>=0.4.3,<0.5.0)"] +pipfile_deprecated_finder = ["pipreqs", "requirementslib"] +plugins = ["setuptools"] +requirements_deprecated_finder = ["pip-api", "pipreqs"] + +[[package]] +name = "Jinja2" +version = "3.1.2" description = "A very fast and expressive template engine." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" [package.dependencies] -MarkupSafe = ">=0.23" +MarkupSafe = ">=2.0" [package.extras] -i18n = ["Babel (>=0.8)"] +i18n = ["Babel (>=2.7)"] [[package]] -name = "markupsafe" -version = "2.0.1" +name = "MarkupSafe" +version = "2.1.1" description = "Safely add untrusted strings to HTML/XML markup." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "multidict" @@ -249,23 +268,23 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "pathspec" -version = "0.9.0" +version = "0.10.1" description = "Utility library for gitignore style pattern matching of file paths." category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +python-versions = ">=3.7" [[package]] name = "platformdirs" -version = "2.4.1" -description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "2.5.3" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false python-versions = ">=3.7" [package.extras] -docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] -test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] +docs = ["furo (>=2022.9.29)", "proselint (>=0.13)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.4)"] +test = ["appdirs (==1.4.4)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] [[package]] name = "pluggy" @@ -284,11 +303,11 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "protobuf" -version = "3.19.4" +version = "3.20.3" description = "Protocol Buffers" category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" [[package]] name = "py" @@ -300,11 +319,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "pyparsing" -version = "3.0.7" -description = "Python parsing module" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.8" [package.extras] diagrams = ["jinja2", "railroad-diagrams"] @@ -355,21 +374,34 @@ six = ">=1.5" [[package]] name = "requests" -version = "2.27.1" +version = "2.28.1" description = "Python HTTP for Humans." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.7, <4" [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} -idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} +charset-normalizer = ">=2,<3" +idna = ">=2.5,<4" urllib3 = ">=1.21.1,<1.27" [package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "setuptools" +version = "65.5.1" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" @@ -391,9 +423,9 @@ python-versions = ">=3.6" typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -dev = ["pre-commit", "rich", "cogapp", "tomli", "coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest (>=6.0)", "simplejson", "furo", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"] +dev = ["cogapp", "coverage[toml]", "freezegun (>=0.2.8)", "furo", "pre-commit", "pretend", "pytest (>=6.0)", "pytest-asyncio", "rich", "simplejson", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "tomli", "twisted"] docs = ["furo", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"] -tests = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest (>=6.0)", "simplejson"] +tests = ["coverage[toml]", "freezegun (>=0.2.8)", "pretend", "pytest (>=6.0)", "pytest-asyncio", "simplejson"] [[package]] name = "toml" @@ -413,7 +445,7 @@ python-versions = ">=3.7" [[package]] name = "typed-ast" -version = "1.5.2" +version = "1.5.4" description = "a fork of Python 2 and 3 ast modules with type comment support" category = "dev" optional = false @@ -421,190 +453,190 @@ python-versions = ">=3.6" [[package]] name = "typing-extensions" -version = "4.0.1" -description = "Backported and Experimental Type Hints for Python 3.6+" +version = "4.4.0" +description = "Backported and Experimental Type Hints for Python 3.7+" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "urllib3" -version = "1.26.8" +version = "1.26.12" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" [package.extras] -brotli = ["brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "zipp" -version = "3.7.0" +version = "3.10.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "dev" optional = false python-versions = ">=3.7" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "71d962eb2955fc82e32fc4299d2f32f150d1e625edd96c8b728dd13a3541d4c0" +content-hash = "2e33db475e3a679a7bfa27111900ca4c28cf4716de73c2b62f4471cebdb828e0" [metadata.files] atomicwrites = [ - {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, - {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, + {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, ] attrs = [ - {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, - {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, + {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, + {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, ] betterproto = [ - {file = "betterproto-2.0.0b4-py3-none-any.whl", hash = "sha256:6b807038df17a7896cc1f98b42f64eed24c2f350b6d10b2854501f8b9b7d3d1e"}, - {file = "betterproto-2.0.0b4.tar.gz", hash = "sha256:99bc6f866fe9c30100fe438662439205f35bc0e65e4e736c46a6ebfea02c3e7b"}, + {file = "betterproto-2.0.0b5-py3-none-any.whl", hash = "sha256:d3e6115c7d5136f1d5974e565b7560273f66b43065e74218e472321ee1258f4c"}, + {file = "betterproto-2.0.0b5.tar.gz", hash = "sha256:00a301c70a2db4d3cdd2b261522ae1d34972fb04b655a154d67daaaf4131102e"}, ] black = [ - {file = "black-22.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1297c63b9e1b96a3d0da2d85d11cd9bf8664251fd69ddac068b98dc4f34f73b6"}, - {file = "black-22.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2ff96450d3ad9ea499fc4c60e425a1439c2120cbbc1ab959ff20f7c76ec7e866"}, - {file = "black-22.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e21e1f1efa65a50e3960edd068b6ae6d64ad6235bd8bfea116a03b21836af71"}, - {file = "black-22.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f69158a7d120fd641d1fa9a921d898e20d52e44a74a6fbbcc570a62a6bc8ab"}, - {file = "black-22.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:228b5ae2c8e3d6227e4bde5920d2fc66cc3400fde7bcc74f480cb07ef0b570d5"}, - {file = "black-22.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b1a5ed73ab4c482208d20434f700d514f66ffe2840f63a6252ecc43a9bc77e8a"}, - {file = "black-22.1.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35944b7100af4a985abfcaa860b06af15590deb1f392f06c8683b4381e8eeaf0"}, - {file = "black-22.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7835fee5238fc0a0baf6c9268fb816b5f5cd9b8793423a75e8cd663c48d073ba"}, - {file = "black-22.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dae63f2dbf82882fa3b2a3c49c32bffe144970a573cd68d247af6560fc493ae1"}, - {file = "black-22.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fa1db02410b1924b6749c245ab38d30621564e658297484952f3d8a39fce7e8"}, - {file = "black-22.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c8226f50b8c34a14608b848dc23a46e5d08397d009446353dad45e04af0c8e28"}, - {file = "black-22.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2d6f331c02f0f40aa51a22e479c8209d37fcd520c77721c034517d44eecf5912"}, - {file = "black-22.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:742ce9af3086e5bd07e58c8feb09dbb2b047b7f566eb5f5bc63fd455814979f3"}, - {file = "black-22.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fdb8754b453fb15fad3f72cd9cad3e16776f0964d67cf30ebcbf10327a3777a3"}, - {file = "black-22.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5660feab44c2e3cb24b2419b998846cbb01c23c7fe645fee45087efa3da2d61"}, - {file = "black-22.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:6f2f01381f91c1efb1451998bd65a129b3ed6f64f79663a55fe0e9b74a5f81fd"}, - {file = "black-22.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:efbadd9b52c060a8fc3b9658744091cb33c31f830b3f074422ed27bad2b18e8f"}, - {file = "black-22.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8871fcb4b447206904932b54b567923e5be802b9b19b744fdff092bd2f3118d0"}, - {file = "black-22.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccad888050f5393f0d6029deea2a33e5ae371fd182a697313bdbd835d3edaf9c"}, - {file = "black-22.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07e5c049442d7ca1a2fc273c79d1aecbbf1bc858f62e8184abe1ad175c4f7cc2"}, - {file = "black-22.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:373922fc66676133ddc3e754e4509196a8c392fec3f5ca4486673e685a421321"}, - {file = "black-22.1.0-py3-none-any.whl", hash = "sha256:3524739d76b6b3ed1132422bf9d82123cd1705086723bc3e235ca39fd21c667d"}, - {file = "black-22.1.0.tar.gz", hash = "sha256:a7c0192d35635f6fc1174be575cb7915e92e5dd629ee79fdaf0dcfa41a80afb5"}, + {file = "black-22.10.0-1fixedarch-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:5cc42ca67989e9c3cf859e84c2bf014f6633db63d1cbdf8fdb666dcd9e77e3fa"}, + {file = "black-22.10.0-1fixedarch-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:5d8f74030e67087b219b032aa33a919fae8806d49c867846bfacde57f43972ef"}, + {file = "black-22.10.0-1fixedarch-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:197df8509263b0b8614e1df1756b1dd41be6738eed2ba9e9769f3880c2b9d7b6"}, + {file = "black-22.10.0-1fixedarch-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:2644b5d63633702bc2c5f3754b1b475378fbbfb481f62319388235d0cd104c2d"}, + {file = "black-22.10.0-1fixedarch-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:e41a86c6c650bcecc6633ee3180d80a025db041a8e2398dcc059b3afa8382cd4"}, + {file = "black-22.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2039230db3c6c639bd84efe3292ec7b06e9214a2992cd9beb293d639c6402edb"}, + {file = "black-22.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ff67aec0a47c424bc99b71005202045dc09270da44a27848d534600ac64fc7"}, + {file = "black-22.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:819dc789f4498ecc91438a7de64427c73b45035e2e3680c92e18795a839ebb66"}, + {file = "black-22.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b9b29da4f564ba8787c119f37d174f2b69cdfdf9015b7d8c5c16121ddc054ae"}, + {file = "black-22.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b49776299fece66bffaafe357d929ca9451450f5466e997a7285ab0fe28e3b"}, + {file = "black-22.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:21199526696b8f09c3997e2b4db8d0b108d801a348414264d2eb8eb2532e540d"}, + {file = "black-22.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e464456d24e23d11fced2bc8c47ef66d471f845c7b7a42f3bd77bf3d1789650"}, + {file = "black-22.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9311e99228ae10023300ecac05be5a296f60d2fd10fff31cf5c1fa4ca4b1988d"}, + {file = "black-22.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fba8a281e570adafb79f7755ac8721b6cf1bbf691186a287e990c7929c7692ff"}, + {file = "black-22.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:915ace4ff03fdfff953962fa672d44be269deb2eaf88499a0f8805221bc68c87"}, + {file = "black-22.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:444ebfb4e441254e87bad00c661fe32df9969b2bf224373a448d8aca2132b395"}, + {file = "black-22.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:974308c58d057a651d182208a484ce80a26dac0caef2895836a92dd6ebd725e0"}, + {file = "black-22.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ef3925f30e12a184889aac03d77d031056860ccae8a1e519f6cbb742736383"}, + {file = "black-22.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:432247333090c8c5366e69627ccb363bc58514ae3e63f7fc75c54b1ea80fa7de"}, + {file = "black-22.10.0-py3-none-any.whl", hash = "sha256:c957b2b4ea88587b46cf49d1dc17681c1e672864fd7af32fc1e9664d572b3458"}, + {file = "black-22.10.0.tar.gz", hash = "sha256:f513588da599943e0cde4e32cc9879e825d58720d6557062d1098c5ad80080e1"}, ] certifi = [ - {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, - {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, + {file = "certifi-2022.9.24-py3-none-any.whl", hash = "sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382"}, + {file = "certifi-2022.9.24.tar.gz", hash = "sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14"}, ] charset-normalizer = [ - {file = "charset-normalizer-2.0.11.tar.gz", hash = "sha256:98398a9d69ee80548c762ba991a4728bfc3836768ed226b3945908d1a688371c"}, - {file = "charset_normalizer-2.0.11-py3-none-any.whl", hash = "sha256:2842d8f5e82a1f6aa437380934d5e1cd4fcf2003b06fed6940769c164a480a45"}, + {file = "charset-normalizer-2.1.1.tar.gz", hash = "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845"}, + {file = "charset_normalizer-2.1.1-py3-none-any.whl", hash = "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"}, ] click = [ - {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"}, - {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"}, + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, ] colorama = [ - {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, - {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] grpcio = [ - {file = "grpcio-1.43.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:a4e786a8ee8b30b25d70ee52cda6d1dbba2a8ca2f1208d8e20ed8280774f15c8"}, - {file = "grpcio-1.43.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:af9c3742f6c13575c0d4147a8454da0ff5308c4d9469462ff18402c6416942fe"}, - {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:fdac966699707b5554b815acc272d81e619dd0999f187cd52a61aef075f870ee"}, - {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e463b4aa0a6b31cf2e57c4abc1a1b53531a18a570baeed39d8d7b65deb16b7e"}, - {file = "grpcio-1.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11d05402e0ac3a284443d8a432d3dfc76a6bd3f7b5858cddd75617af2d7bd9b"}, - {file = "grpcio-1.43.0-cp310-cp310-win32.whl", hash = "sha256:c36f418c925a41fccada8f7ae9a3d3e227bfa837ddbfddd3d8b0ac252d12dda9"}, - {file = "grpcio-1.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:772b943f34374744f70236bbbe0afe413ed80f9ae6303503f85e2b421d4bca92"}, - {file = "grpcio-1.43.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:cbc9b83211d905859dcf234ad39d7193ff0f05bfc3269c364fb0d114ee71de59"}, - {file = "grpcio-1.43.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:fb7229fa2a201a0c377ff3283174ec966da8f9fd7ffcc9a92f162d2e7fc9025b"}, - {file = "grpcio-1.43.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:17b75f220ee6923338155b4fcef4c38802b9a57bc57d112c9599a13a03e99f8d"}, - {file = "grpcio-1.43.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:6620a5b751b099b3b25553cfc03dfcd873cda06f9bb2ff7e9948ac7090e20f05"}, - {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:1898f999383baac5fcdbdef8ea5b1ef204f38dc211014eb6977ac6e55944d738"}, - {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47b6821238d8978014d23b1132713dac6c2d72cbb561cf257608b1673894f90a"}, - {file = "grpcio-1.43.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80398e9fb598060fa41050d1220f5a2440fe74ff082c36dda41ac3215ebb5ddd"}, - {file = "grpcio-1.43.0-cp36-cp36m-win32.whl", hash = "sha256:0110310eff07bb69782f53b7a947490268c4645de559034c43c0a635612e250f"}, - {file = "grpcio-1.43.0-cp36-cp36m-win_amd64.whl", hash = "sha256:45401d00f2ee46bde75618bf33e9df960daa7980e6e0e7328047191918c98504"}, - {file = "grpcio-1.43.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:af78ac55933811e6a25141336b1f2d5e0659c2f568d44d20539b273792563ca7"}, - {file = "grpcio-1.43.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8b2b9dc4d7897566723b77422e11c009a0ebd397966b165b21b89a62891a9fdf"}, - {file = "grpcio-1.43.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:77ef653f966934b3bfdd00e4f2064b68880eb40cf09b0b99edfa5ee22a44f559"}, - {file = "grpcio-1.43.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:e95b5d62ec26d0cd0b90c202d73e7cb927c369c3358e027225239a4e354967dc"}, - {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:04239e8f71db832c26bbbedb4537b37550a39d77681d748ab4678e58dd6455d6"}, - {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b4a7152187a49767a47d1413edde2304c96f41f7bc92cc512e230dfd0fba095"}, - {file = "grpcio-1.43.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8cc936a29c65ab39714e1ba67a694c41218f98b6e2a64efb83f04d9abc4386b"}, - {file = "grpcio-1.43.0-cp37-cp37m-win32.whl", hash = "sha256:577e024c8dd5f27cd98ba850bc4e890f07d4b5942e5bc059a3d88843a2f48f66"}, - {file = "grpcio-1.43.0-cp37-cp37m-win_amd64.whl", hash = "sha256:138f57e3445d4a48d9a8a5af1538fdaafaa50a0a3c243f281d8df0edf221dc02"}, - {file = "grpcio-1.43.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:08cf25f2936629db062aeddbb594bd76b3383ab0ede75ef0461a3b0bc3a2c150"}, - {file = "grpcio-1.43.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:01f4b887ed703fe82ebe613e1d2dadea517891725e17e7a6134dcd00352bd28c"}, - {file = "grpcio-1.43.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:0aa8285f284338eb68962fe1a830291db06f366ea12f213399b520c062b01f65"}, - {file = "grpcio-1.43.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:0edbfeb6729aa9da33ce7e28fb7703b3754934115454ae45e8cc1db601756fd3"}, - {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:c354017819201053d65212befd1dcb65c2d91b704d8977e696bae79c47cd2f82"}, - {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50cfb7e1067ee5e00b8ab100a6b7ea322d37ec6672c0455106520b5891c4b5f5"}, - {file = "grpcio-1.43.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57f1aeb65ed17dfb2f6cd717cc109910fe395133af7257a9c729c0b9604eac10"}, - {file = "grpcio-1.43.0-cp38-cp38-win32.whl", hash = "sha256:fa26a8bbb3fe57845acb1329ff700d5c7eaf06414c3e15f4cb8923f3a466ef64"}, - {file = "grpcio-1.43.0-cp38-cp38-win_amd64.whl", hash = "sha256:ade8b79a6b6aea68adb9d4bfeba5d647667d842202c5d8f3ba37ac1dc8e5c09c"}, - {file = "grpcio-1.43.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:124e718faf96fe44c98b05f3f475076be8b5198bb4c52a13208acf88a8548ba9"}, - {file = "grpcio-1.43.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2f96142d0abc91290a63ba203f01649e498302b1b6007c67bad17f823ecde0cf"}, - {file = "grpcio-1.43.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:31e6e489ccd8f08884b9349a39610982df48535881ec34f05a11c6e6b6ebf9d0"}, - {file = "grpcio-1.43.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:0e731f660e1e68238f56f4ce11156f02fd06dc58bc7834778d42c0081d4ef5ad"}, - {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:1f16725a320460435a8a5339d8b06c4e00d307ab5ad56746af2e22b5f9c50932"}, - {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b4543e13acb4806917d883d0f70f21ba93b29672ea81f4aaba14821aaf9bb0"}, - {file = "grpcio-1.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:594aaa0469f4fca7773e80d8c27bf1298e7bbce5f6da0f084b07489a708f16ab"}, - {file = "grpcio-1.43.0-cp39-cp39-win32.whl", hash = "sha256:5449ae564349e7a738b8c38583c0aad954b0d5d1dd3cea68953bfc32eaee11e3"}, - {file = "grpcio-1.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:bdf41550815a831384d21a498b20597417fd31bd084deb17d31ceb39ad9acc79"}, - {file = "grpcio-1.43.0.tar.gz", hash = "sha256:735d9a437c262ab039d02defddcb9f8f545d7009ae61c0114e19dda3843febe5"}, + {file = "grpcio-1.50.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:906f4d1beb83b3496be91684c47a5d870ee628715227d5d7c54b04a8de802974"}, + {file = "grpcio-1.50.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:2d9fd6e38b16c4d286a01e1776fdf6c7a4123d99ae8d6b3f0b4a03a34bf6ce45"}, + {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4b123fbb7a777a2fedec684ca0b723d85e1d2379b6032a9a9b7851829ed3ca9a"}, + {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2f77a90ba7b85bfb31329f8eab9d9540da2cf8a302128fb1241d7ea239a5469"}, + {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eea18a878cffc804506d39c6682d71f6b42ec1c151d21865a95fae743fda500"}, + {file = "grpcio-1.50.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b71916fa8f9eb2abd93151fafe12e18cebb302686b924bd4ec39266211da525"}, + {file = "grpcio-1.50.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:95ce51f7a09491fb3da8cf3935005bff19983b77c4e9437ef77235d787b06842"}, + {file = "grpcio-1.50.0-cp310-cp310-win32.whl", hash = "sha256:f7025930039a011ed7d7e7ef95a1cb5f516e23c5a6ecc7947259b67bea8e06ca"}, + {file = "grpcio-1.50.0-cp310-cp310-win_amd64.whl", hash = "sha256:05f7c248e440f538aaad13eee78ef35f0541e73498dd6f832fe284542ac4b298"}, + {file = "grpcio-1.50.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:ca8a2254ab88482936ce941485c1c20cdeaef0efa71a61dbad171ab6758ec998"}, + {file = "grpcio-1.50.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3b611b3de3dfd2c47549ca01abfa9bbb95937eb0ea546ea1d762a335739887be"}, + {file = "grpcio-1.50.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a4cd8cb09d1bc70b3ea37802be484c5ae5a576108bad14728f2516279165dd7"}, + {file = "grpcio-1.50.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:156f8009e36780fab48c979c5605eda646065d4695deea4cfcbcfdd06627ddb6"}, + {file = "grpcio-1.50.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de411d2b030134b642c092e986d21aefb9d26a28bf5a18c47dd08ded411a3bc5"}, + {file = "grpcio-1.50.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d144ad10eeca4c1d1ce930faa105899f86f5d99cecfe0d7224f3c4c76265c15e"}, + {file = "grpcio-1.50.0-cp311-cp311-win32.whl", hash = "sha256:92d7635d1059d40d2ec29c8bf5ec58900120b3ce5150ef7414119430a4b2dd5c"}, + {file = "grpcio-1.50.0-cp311-cp311-win_amd64.whl", hash = "sha256:ce8513aee0af9c159319692bfbf488b718d1793d764798c3d5cff827a09e25ef"}, + {file = "grpcio-1.50.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:8e8999a097ad89b30d584c034929f7c0be280cd7851ac23e9067111167dcbf55"}, + {file = "grpcio-1.50.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:a50a1be449b9e238b9bd43d3857d40edf65df9416dea988929891d92a9f8a778"}, + {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:cf151f97f5f381163912e8952eb5b3afe89dec9ed723d1561d59cabf1e219a35"}, + {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a23d47f2fc7111869f0ff547f771733661ff2818562b04b9ed674fa208e261f4"}, + {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84d04dec64cc4ed726d07c5d17b73c343c8ddcd6b59c7199c801d6bbb9d9ed1"}, + {file = "grpcio-1.50.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:67dd41a31f6fc5c7db097a5c14a3fa588af54736ffc174af4411d34c4f306f68"}, + {file = "grpcio-1.50.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d4c8e73bf20fb53fe5a7318e768b9734cf122fe671fcce75654b98ba12dfb75"}, + {file = "grpcio-1.50.0-cp37-cp37m-win32.whl", hash = "sha256:7489dbb901f4fdf7aec8d3753eadd40839c9085967737606d2c35b43074eea24"}, + {file = "grpcio-1.50.0-cp37-cp37m-win_amd64.whl", hash = "sha256:531f8b46f3d3db91d9ef285191825d108090856b3bc86a75b7c3930f16ce432f"}, + {file = "grpcio-1.50.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:d534d169673dd5e6e12fb57cc67664c2641361e1a0885545495e65a7b761b0f4"}, + {file = "grpcio-1.50.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:1d8d02dbb616c0a9260ce587eb751c9c7dc689bc39efa6a88cc4fa3e9c138a7b"}, + {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:baab51dcc4f2aecabf4ed1e2f57bceab240987c8b03533f1cef90890e6502067"}, + {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40838061e24f960b853d7bce85086c8e1b81c6342b1f4c47ff0edd44bbae2722"}, + {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:931e746d0f75b2a5cff0a1197d21827a3a2f400c06bace036762110f19d3d507"}, + {file = "grpcio-1.50.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:15f9e6d7f564e8f0776770e6ef32dac172c6f9960c478616c366862933fa08b4"}, + {file = "grpcio-1.50.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a4c23e54f58e016761b576976da6a34d876420b993f45f66a2bfb00363ecc1f9"}, + {file = "grpcio-1.50.0-cp38-cp38-win32.whl", hash = "sha256:3e4244c09cc1b65c286d709658c061f12c61c814be0b7030a2d9966ff02611e0"}, + {file = "grpcio-1.50.0-cp38-cp38-win_amd64.whl", hash = "sha256:8e69aa4e9b7f065f01d3fdcecbe0397895a772d99954bb82eefbb1682d274518"}, + {file = "grpcio-1.50.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:af98d49e56605a2912cf330b4627e5286243242706c3a9fa0bcec6e6f68646fc"}, + {file = "grpcio-1.50.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:080b66253f29e1646ac53ef288c12944b131a2829488ac3bac8f52abb4413c0d"}, + {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:ab5d0e3590f0a16cb88de4a3fa78d10eb66a84ca80901eb2c17c1d2c308c230f"}, + {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb11464f480e6103c59d558a3875bd84eed6723f0921290325ebe97262ae1347"}, + {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e07fe0d7ae395897981d16be61f0db9791f482f03fee7d1851fe20ddb4f69c03"}, + {file = "grpcio-1.50.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d75061367a69808ab2e84c960e9dce54749bcc1e44ad3f85deee3a6c75b4ede9"}, + {file = "grpcio-1.50.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ae23daa7eda93c1c49a9ecc316e027ceb99adbad750fbd3a56fa9e4a2ffd5ae0"}, + {file = "grpcio-1.50.0-cp39-cp39-win32.whl", hash = "sha256:177afaa7dba3ab5bfc211a71b90da1b887d441df33732e94e26860b3321434d9"}, + {file = "grpcio-1.50.0-cp39-cp39-win_amd64.whl", hash = "sha256:ea8ccf95e4c7e20419b7827aa5b6da6f02720270686ac63bd3493a651830235c"}, + {file = "grpcio-1.50.0.tar.gz", hash = "sha256:12b479839a5e753580b5e6053571de14006157f2ef9b71f38c56dc9b23b95ad6"}, ] grpcio-tools = [ - {file = "grpcio-tools-1.43.0.tar.gz", hash = "sha256:f42f1d713096808b1b0472dd2a3749b712d13f0092dab9442d9c096446e860b2"}, - {file = "grpcio_tools-1.43.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:766771ef5b60ebcba0a3bdb302dd92fda988552eb8508451ff6d97371eac38e5"}, - {file = "grpcio_tools-1.43.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:178a881db5de0f89abf3aeeb260ecfd1116cc31f88fb600a45fb5b19c3323b33"}, - {file = "grpcio_tools-1.43.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:019f55929e963214471825c7a4cdab7a57069109d5621b24e4db7b428b5fe47d"}, - {file = "grpcio_tools-1.43.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6c0e1d1b47554c580882d392b739df91a55b6a8ec696b2b2e1bbc127d63df2c"}, - {file = "grpcio_tools-1.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c5c80098fa69593b828d119973744de03c3f9a6935df8a02e4329a39b7072f5"}, - {file = "grpcio_tools-1.43.0-cp310-cp310-win32.whl", hash = "sha256:53f7dcaa4218df1b64b39d0fc7236a8270e8ab2db4ab8cd1d2fda0e6d4544946"}, - {file = "grpcio_tools-1.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:5be6d402b0cafef20ba3abb3baa37444961d9a9c4a6434d3d7c1f082f7697deb"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:8953fdebef6905d7ff13a5a376b21b6fecd808d18bf4f0d3990ffe4a215d56eb"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:18870dcc8369ac4c37213e6796d8dc20494ea770670204f5e573f88e69eaaf0b"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:010a4be6a2fccbd6741a4809c5da7f2e39a1e9e227745e6b495be567638bbeb9"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:426f16b6b14d533ce61249a18fbcd1a23a4fa0c71a6d7ab347b1c7f862847bb8"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:f974cb0bea88bac892c3ed16da92c6ac88cff0fea17f24bf0e1892eb4d27cd00"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55c2e604536e06248e2f81e549737fb3a180c8117832e494a0a8a81fbde44837"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f97f9ffa49348fb24692751d2d4455ef2968bd07fe536d65597caaec14222629"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-win32.whl", hash = "sha256:6eaf97414237b8670ae9fa623879a26eabcc4c635b550c79a81e17eb600d6ae3"}, - {file = "grpcio_tools-1.43.0-cp36-cp36m-win_amd64.whl", hash = "sha256:04f100c1f6a7c72c537760c33582f6970070bd6fa6676b529bccfa31cc58bc79"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:9dbb6d1f58f26d88ae689f1b49de84cfaf4786c81c01b9001d3ceea178116a07"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:63862a441a77f6326ea9fe4bb005882f0e363441a5968d9cf8621c34d3dadc2b"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:6dea0cb2e79b67593553ed8662f70e4310599fa8850fc0e056b19fcb63572b7f"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:3eb4aa5b0e578c3d9d9da8e37a2ef73654287a498b8081543acd0db7f0ec1a9c"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:09464c6b17663088144b7e6ea10e9465efdcee03d4b2ffefab39a799bd8360f8"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2458d6b0404f83d95aef00cec01f310d30e9719564a25be50e39b259f6a2da5d"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e9bb5da437364b7dcd2d3c6850747081ecbec0ba645c96c6d471f7e21fdcadb"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-win32.whl", hash = "sha256:2737f749a6ab965748629e619b35f3e1cbe5820fc79e34c88f73cb99efc71dde"}, - {file = "grpcio_tools-1.43.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c39cbe7b902bb92f9afaa035091f5e2b8be35acbac501fec8cb6a0be7d7cdbbd"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:05550ba473cff7c09e905fcfb2263fd1f7600389660194ec022b5d5a3802534b"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:ce13a922db8f5f95c5041d3a4cbf04d942b353f0cba9b251a674f69a31a2d3a6"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:f19d40690c97365c1c1bde81474e6f496d7ab76f87e6d2889c72ad01bac98f2d"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba3da574eb08fcaed541b3fc97ce217360fd86d954fa9ad6a604803d57a2e049"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:efd1eb5880001f5189cfa3a774675cc9bbc8cc51586a3e90fe796394ac8626b8"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:234c7a5af653357df5c616e013173eddda6193146c8ab38f3108c4784f66be26"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7e3662f62d410b3f81823b5fa0f79c6e0e250977a1058e4131867b85138a661"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-win32.whl", hash = "sha256:5f2e584d7644ef924e9e042fa151a3bb9f7c28ef1ae260ee6c9cb327982b5e94"}, - {file = "grpcio_tools-1.43.0-cp38-cp38-win_amd64.whl", hash = "sha256:98dcb5b756855110fb661ccd6a93a716610b7efcd5720a3aec01358a1a892c30"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:61ef6cb6ccf9b9c27bb85fffc5338194bcf444df502196c2ad0ff8df4706d41e"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:1def9b68ac9e62674929bc6590a33d89635f1cf16016657d9e16a69f41aa5c36"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:b68cc0c95a0f8c757e8d69b5fa46111d5c9d887ae62af28f827649b1d1b70fe1"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:e956b5c3b586d7b27eae49fb06f544a26288596fe12e22ffec768109717276d1"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:671e61bbc91d8d568f12c3654bb5a91fce9f3fdfd5ec2cfc60c2d3a840449aa6"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7173ed19854d1066bce9bdc09f735ca9c13e74a25d47a1cc5d1fe803b53bffb"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1adb0dbcc1c10b86dcda910b8f56e39210e401bcee923dba166ba923a5f4696a"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-win32.whl", hash = "sha256:ebfb94ddb454a6dc3a505d9531dc81c948e6364e181b8795bfad3f3f479974dc"}, - {file = "grpcio_tools-1.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:d21928b680e6e29538688cffbf53f3d5a53cff0ec8f0c33139641700045bdf1a"}, + {file = "grpcio-tools-1.48.2.tar.gz", hash = "sha256:8902a035708555cddbd61b5467cea127484362decc52de03f061a1a520fe90cd"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:92acc3e10ba2b0dcb90a88ae9fe1cc0ffba6868545207e4ff20ca95284f8e3c9"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e5bb396d63495667d4df42e506eed9d74fc9a51c99c173c04395fe7604c848f1"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:84a84d601a238572d049d3108e04fe4c206536e81076d56e623bd525a1b38def"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70564521e86a0de35ea9ac6daecff10cb46860aec469af65869974807ce8e98b"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdbbe63f6190187de5946891941629912ac8196701ed2253fa91624a397822ec"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae56f133b05b7e5d780ef7e032dd762adad7f3dc8f64adb43ff5bfabd659f435"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0feb4f2b777fa6377e977faa89c26359d4f31953de15e035505b92f41aa6906"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-win32.whl", hash = "sha256:80f450272316ca0924545f488c8492649ca3aeb7044d4bf59c426dcdee527f7c"}, + {file = "grpcio_tools-1.48.2-cp310-cp310-win_amd64.whl", hash = "sha256:21ff50e321736eba22210bf9b94e05391a9ac345f26e7df16333dc75d63e74fb"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-linux_armv7l.whl", hash = "sha256:d598ccde6338b2cfbb3124f34c95f03394209013f9b1ed4a5360a736853b1c27"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:a43d26714933f23de93ea0bf9c86c66a6ede709b8ca32e357f9e2181703e64ae"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:55fdebc73fb580717656b1bafa4f8eca448726a7aa22726a6c0a7895d2f0f088"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8588819b22d0de3aa1951e1991cc3e4b9aa105eecf6e3e24eb0a2fc8ab958b3e"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9771d4d317dca029dfaca7ec9282d8afe731c18bc536ece37fd39b8a974cc331"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d886a9e052a038642b3af5d18e6f2085d1656d9788e202dc23258cf3a751e7ca"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d77e8b1613876e0d8fd17709509d4ceba13492816426bd156f7e88a4c47e7158"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-win32.whl", hash = "sha256:dcaaecdd5e847de5c1d533ea91522bf56c9e6b2dc98cdc0d45f0a1c26e846ea2"}, + {file = "grpcio_tools-1.48.2-cp36-cp36m-win_amd64.whl", hash = "sha256:0119aabd9ceedfdf41b56b9fdc8284dd85a7f589d087f2694d743f346a368556"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:189be2a9b672300ca6845d94016bdacc052fdbe9d1ae9e85344425efae2ff8ef"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:9443f5c30bac449237c3cf99da125f8d6e6c01e17972bc683ee73b75dea95573"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:e0403e095b343431195db1305248b50019ad55d3dd310254431af87e14ef83a2"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5410d6b601d1404835e34466bd8aee37213489b36ee1aad2276366e265ff29d4"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51be91b7c7056ff9ee48b1eccd4a2840b0126230803a5e09dfc082a5b16a91c1"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:516eedd5eb7af6326050bc2cfceb3a977b9cc1144f283c43cc4956905285c912"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d18599ab572b2f15a8f3db49503272d1bb4fcabb4b4d1214ef03aca1816b20a0"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-win32.whl", hash = "sha256:d18ef2adc05a8ef9e58ac46357f6d4ce7e43e077c7eda0a4425773461f9d0e6e"}, + {file = "grpcio_tools-1.48.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d9753944e5a6b6b78b76ce9d2ae0fe3f748008c1849deb7fadcb64489d6553b"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:3c8749dca04a8d302862ceeb1dfbdd071ee13b281395975f24405a347e5baa57"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:7307dd2408b82ea545ae63502ec03036b025f449568556ea9a056e06129a7a4e"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:072234859f6069dc43a6be8ad6b7d682f4ba1dc2e2db2ebf5c75f62eee0f6dfb"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cc298fbfe584de8876a85355efbcf796dfbcfac5948c9560f5df82e79336e2a"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f75973a42c710999acd419968bc79f00327e03e855bbe82c6529e003e49af660"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f766050e491d0b3203b6b85638015f543816a2eb7d089fc04e86e00f6de0e31d"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8e0d74403484eb77e8df2566a64b8b0b484b5c87903678c381634dd72f252d5e"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-win32.whl", hash = "sha256:cb75bac0cd43858cb759ef103fe68f8c540cb58b63dda127e710228fec3007b8"}, + {file = "grpcio_tools-1.48.2-cp38-cp38-win_amd64.whl", hash = "sha256:cabc8b0905cedbc3b2b7b2856334fa35cce3d4bc79ae241cacd8cca8940a5c85"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:e712a6d00606ad19abdeae852a7e521d6f6d0dcea843708fecf3a38be16a851e"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:e7e7668f89fd598c5469bb58e16bfd12b511d9947ccc75aec94da31f62bc3758"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:a415fbec67d4ff7efe88794cbe00cf548d0f0a5484cceffe0a0c89d47694c491"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d96e96ae7361aa51c9cd9c73b677b51f691f98df6086860fcc3c45852d96b0b0"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e20d7885a40e68a2bda92908acbabcdf3c14dd386c3845de73ba139e9df1f132"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8a5614251c46da07549e24f417cf989710250385e9d80deeafc53a0ee7df6325"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ace0035766fe01a1b096aa050be9f0a9f98402317e7aeff8bfe55349be32a407"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-win32.whl", hash = "sha256:4fa4300b1be59b046492ed3c5fdb59760bc6433f44c08f50de900f9552ec7461"}, + {file = "grpcio_tools-1.48.2-cp39-cp39-win_amd64.whl", hash = "sha256:0fb6c1c1e56eb26b224adc028a4204b6ad0f8b292efa28067dff273bbc8b27c4"}, ] grpclib = [ - {file = "grpclib-0.4.2.tar.gz", hash = "sha256:ead080cb7d56d6a5e835aaf5255d1ef1dce475a7722566ea225f0188fce33b68"}, + {file = "grpclib-0.4.3.tar.gz", hash = "sha256:eadf2002fc5a25158b707c0338a6c0b96dd7fbdc6df66f7e515e7f041d56a940"}, ] h2 = [ {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, @@ -619,56 +651,66 @@ hyperframe = [ {file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"}, ] idna = [ - {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, - {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"}, - {file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"}, + {file = "importlib_metadata-5.0.0-py3-none-any.whl", hash = "sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43"}, + {file = "importlib_metadata-5.0.0.tar.gz", hash = "sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab"}, ] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] -jinja2 = [ - {file = "Jinja2-2.11.3-py2.py3-none-any.whl", hash = "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419"}, - {file = "Jinja2-2.11.3.tar.gz", hash = "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"}, -] -markupsafe = [ - {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, - {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, +isort = [ + {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"}, + {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"}, +] +Jinja2 = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] +MarkupSafe = [ + {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-win32.whl", hash = "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6"}, + {file = "MarkupSafe-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff"}, + {file = "MarkupSafe-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-win32.whl", hash = "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1"}, + {file = "MarkupSafe-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-win32.whl", hash = "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c"}, + {file = "MarkupSafe-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247"}, + {file = "MarkupSafe-2.1.1.tar.gz", hash = "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b"}, ] multidict = [ {file = "multidict-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b9e95a740109c6047602f4db4da9949e6c5945cefbad34a1299775ddc9a62e2"}, @@ -740,52 +782,48 @@ packaging = [ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] pathspec = [ - {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, - {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, + {file = "pathspec-0.10.1-py3-none-any.whl", hash = "sha256:46846318467efc4556ccfd27816e004270a9eeeeb4d062ce5e6fc7a87c573f93"}, + {file = "pathspec-0.10.1.tar.gz", hash = "sha256:7ace6161b621d31e7902eb6b5ae148d12cfd23f4a249b9ffb6b9fee12084323d"}, ] platformdirs = [ - {file = "platformdirs-2.4.1-py3-none-any.whl", hash = "sha256:1d7385c7db91728b83efd0ca99a5afb296cab9d0ed8313a45ed8ba17967ecfca"}, - {file = "platformdirs-2.4.1.tar.gz", hash = "sha256:440633ddfebcc36264232365d7840a970e75e1018d15b4327d11f91909045fda"}, + {file = "platformdirs-2.5.3-py3-none-any.whl", hash = "sha256:0cb405749187a194f444c25c82ef7225232f11564721eabffc6ec70df83b11cb"}, + {file = "platformdirs-2.5.3.tar.gz", hash = "sha256:6e52c21afff35cb659c6e52d8b4d61b9bd544557180440538f255d9382c8cbe0"}, ] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] protobuf = [ - {file = "protobuf-3.19.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f51d5a9f137f7a2cec2d326a74b6e3fc79d635d69ffe1b036d39fc7d75430d37"}, - {file = "protobuf-3.19.4-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:09297b7972da685ce269ec52af761743714996b4381c085205914c41fcab59fb"}, - {file = "protobuf-3.19.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:072fbc78d705d3edc7ccac58a62c4c8e0cec856987da7df8aca86e647be4e35c"}, - {file = "protobuf-3.19.4-cp310-cp310-win32.whl", hash = "sha256:7bb03bc2873a2842e5ebb4801f5c7ff1bfbdf426f85d0172f7644fcda0671ae0"}, - {file = "protobuf-3.19.4-cp310-cp310-win_amd64.whl", hash = "sha256:f358aa33e03b7a84e0d91270a4d4d8f5df6921abe99a377828839e8ed0c04e07"}, - {file = "protobuf-3.19.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1c91ef4110fdd2c590effb5dca8fdbdcb3bf563eece99287019c4204f53d81a4"}, - {file = "protobuf-3.19.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c438268eebb8cf039552897d78f402d734a404f1360592fef55297285f7f953f"}, - {file = "protobuf-3.19.4-cp36-cp36m-win32.whl", hash = "sha256:835a9c949dc193953c319603b2961c5c8f4327957fe23d914ca80d982665e8ee"}, - {file = "protobuf-3.19.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4276cdec4447bd5015453e41bdc0c0c1234eda08420b7c9a18b8d647add51e4b"}, - {file = "protobuf-3.19.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6cbc312be5e71869d9d5ea25147cdf652a6781cf4d906497ca7690b7b9b5df13"}, - {file = "protobuf-3.19.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:54a1473077f3b616779ce31f477351a45b4fef8c9fd7892d6d87e287a38df368"}, - {file = "protobuf-3.19.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:435bb78b37fc386f9275a7035fe4fb1364484e38980d0dd91bc834a02c5ec909"}, - {file = "protobuf-3.19.4-cp37-cp37m-win32.whl", hash = "sha256:16f519de1313f1b7139ad70772e7db515b1420d208cb16c6d7858ea989fc64a9"}, - {file = "protobuf-3.19.4-cp37-cp37m-win_amd64.whl", hash = "sha256:cdc076c03381f5c1d9bb1abdcc5503d9ca8b53cf0a9d31a9f6754ec9e6c8af0f"}, - {file = "protobuf-3.19.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:69da7d39e39942bd52848438462674c463e23963a1fdaa84d88df7fbd7e749b2"}, - {file = "protobuf-3.19.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:48ed3877fa43e22bcacc852ca76d4775741f9709dd9575881a373bd3e85e54b2"}, - {file = "protobuf-3.19.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd95d1dfb9c4f4563e6093a9aa19d9c186bf98fa54da5252531cc0d3a07977e7"}, - {file = "protobuf-3.19.4-cp38-cp38-win32.whl", hash = "sha256:b38057450a0c566cbd04890a40edf916db890f2818e8682221611d78dc32ae26"}, - {file = "protobuf-3.19.4-cp38-cp38-win_amd64.whl", hash = "sha256:7ca7da9c339ca8890d66958f5462beabd611eca6c958691a8fe6eccbd1eb0c6e"}, - {file = "protobuf-3.19.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:36cecbabbda242915529b8ff364f2263cd4de7c46bbe361418b5ed859677ba58"}, - {file = "protobuf-3.19.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:c1068287025f8ea025103e37d62ffd63fec8e9e636246b89c341aeda8a67c934"}, - {file = "protobuf-3.19.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96bd766831596d6014ca88d86dc8fe0fb2e428c0b02432fd9db3943202bf8c5e"}, - {file = "protobuf-3.19.4-cp39-cp39-win32.whl", hash = "sha256:84123274d982b9e248a143dadd1b9815049f4477dc783bf84efe6250eb4b836a"}, - {file = "protobuf-3.19.4-cp39-cp39-win_amd64.whl", hash = "sha256:3112b58aac3bac9c8be2b60a9daf6b558ca3f7681c130dcdd788ade7c9ffbdca"}, - {file = "protobuf-3.19.4-py2.py3-none-any.whl", hash = "sha256:8961c3a78ebfcd000920c9060a262f082f29838682b1f7201889300c1fbe0616"}, - {file = "protobuf-3.19.4.tar.gz", hash = "sha256:9df0c10adf3e83015ced42a9a7bd64e13d06c4cf45c340d2c63020ea04499d0a"}, + {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, + {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, + {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"}, + {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"}, + {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"}, + {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"}, + {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"}, + {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"}, + {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"}, + {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"}, + {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"}, + {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"}, + {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"}, + {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"}, + {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"}, + {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, ] py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] pyparsing = [ - {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"}, - {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, ] pytest = [ {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, @@ -800,8 +838,12 @@ python-dateutil = [ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] requests = [ - {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, - {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, + {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"}, + {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"}, +] +setuptools = [ + {file = "setuptools-65.5.1-py3-none-any.whl", hash = "sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31"}, + {file = "setuptools-65.5.1.tar.gz", hash = "sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f"}, ] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, @@ -820,40 +862,40 @@ tomli = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] typed-ast = [ - {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"}, - {file = "typed_ast-1.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:676d051b1da67a852c0447621fdd11c4e104827417bf216092ec3e286f7da596"}, - {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc2542e83ac8399752bc16e0b35e038bdb659ba237f4222616b4e83fb9654985"}, - {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74cac86cc586db8dfda0ce65d8bcd2bf17b58668dfcc3652762f3ef0e6677e76"}, - {file = "typed_ast-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:18fe320f354d6f9ad3147859b6e16649a0781425268c4dde596093177660e71a"}, - {file = "typed_ast-1.5.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:31d8c6b2df19a777bc8826770b872a45a1f30cfefcfd729491baa5237faae837"}, - {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:963a0ccc9a4188524e6e6d39b12c9ca24cc2d45a71cfdd04a26d883c922b4b78"}, - {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb77764ea470f14fcbb89d51bc6bbf5e7623446ac4ed06cbd9ca9495b62e36e"}, - {file = "typed_ast-1.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:294a6903a4d087db805a7656989f613371915fc45c8cc0ddc5c5a0a8ad9bea4d"}, - {file = "typed_ast-1.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:26a432dc219c6b6f38be20a958cbe1abffcc5492821d7e27f08606ef99e0dffd"}, - {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7407cfcad702f0b6c0e0f3e7ab876cd1d2c13b14ce770e412c0c4b9728a0f88"}, - {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f30ddd110634c2d7534b2d4e0e22967e88366b0d356b24de87419cc4410c41b7"}, - {file = "typed_ast-1.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8c08d6625bb258179b6e512f55ad20f9dfef019bbfbe3095247401e053a3ea30"}, - {file = "typed_ast-1.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:90904d889ab8e81a956f2c0935a523cc4e077c7847a836abee832f868d5c26a4"}, - {file = "typed_ast-1.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bbebc31bf11762b63bf61aaae232becb41c5bf6b3461b80a4df7e791fabb3aca"}, - {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29dd9a3a9d259c9fa19d19738d021632d673f6ed9b35a739f48e5f807f264fb"}, - {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:58ae097a325e9bb7a684572d20eb3e1809802c5c9ec7108e85da1eb6c1a3331b"}, - {file = "typed_ast-1.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:da0a98d458010bf4fe535f2d1e367a2e2060e105978873c04c04212fb20543f7"}, - {file = "typed_ast-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33b4a19ddc9fc551ebabca9765d54d04600c4a50eda13893dadf67ed81d9a098"}, - {file = "typed_ast-1.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1098df9a0592dd4c8c0ccfc2e98931278a6c6c53cb3a3e2cf7e9ee3b06153344"}, - {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c47c3b43fe3a39ddf8de1d40dbbfca60ac8530a36c9b198ea5b9efac75c09e"}, - {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f290617f74a610849bd8f5514e34ae3d09eafd521dceaa6cf68b3f4414266d4e"}, - {file = "typed_ast-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:df05aa5b241e2e8045f5f4367a9f6187b09c4cdf8578bb219861c4e27c443db5"}, - {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, + {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, + {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, + {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, + {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, + {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, + {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, + {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, + {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, ] typing-extensions = [ - {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"}, - {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"}, + {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, + {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, ] urllib3 = [ - {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, - {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, + {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"}, + {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"}, ] zipp = [ - {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, - {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, + {file = "zipp-3.10.0-py3-none-any.whl", hash = "sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1"}, + {file = "zipp-3.10.0.tar.gz", hash = "sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8"}, ] diff --git a/pyproject.toml b/pyproject.toml index 93779d1..643a457 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "envoy_data_plane" -version = "0.3.0" +version = "0.4.0" description = "Python dataclasses for the Envoy Data-Plane-API" authors = ["Vasili Syrakis "] license = "MIT" @@ -22,7 +22,7 @@ pytest = "^6.2.2" pytest-spec = "^3.1.0" grpcio = "^1.36.1" grpcio-tools = "^1.36.1" -betterproto = {version = "^2.0.0b3", extras = ["compiler"]} +betterproto = {version = "^2.0.0b5", extras = ["compiler"]} [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/src/envoy_data_plane/__init__.py b/src/envoy_data_plane/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/__init__.py b/src/envoy_data_plane/envoy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/admin/__init__.py b/src/envoy_data_plane/envoy/admin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/admin/v2alpha/__init__.py b/src/envoy_data_plane/envoy/admin/v2alpha/__init__.py deleted file mode 100644 index 07f916e..0000000 --- a/src/envoy_data_plane/envoy/admin/v2alpha/__init__.py +++ /dev/null @@ -1,750 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/admin/v2alpha/certs.proto, envoy/admin/v2alpha/clusters.proto, envoy/admin/v2alpha/config_dump.proto, envoy/admin/v2alpha/listeners.proto, envoy/admin/v2alpha/memory.proto, envoy/admin/v2alpha/metrics.proto, envoy/admin/v2alpha/mutex_stats.proto, envoy/admin/v2alpha/server_info.proto, envoy/admin/v2alpha/tap.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import datetime, timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class SimpleMetricType(betterproto.Enum): - COUNTER = 0 - GAUGE = 1 - - -class ServerInfoState(betterproto.Enum): - LIVE = 0 - DRAINING = 1 - PRE_INITIALIZING = 2 - INITIALIZING = 3 - - -class CommandLineOptionsIpVersion(betterproto.Enum): - v4 = 0 - v6 = 1 - - -class CommandLineOptionsMode(betterproto.Enum): - Serve = 0 - Validate = 1 - InitOnly = 2 - - -@dataclass(eq=False, repr=False) -class MutexStats(betterproto.Message): - """ - Proto representation of the statistics collected upon absl::Mutex - contention, if Envoy is run under :option:`--enable-mutex-tracing`. For - more information, see the `absl::Mutex` - [docs](https://abseil.io/about/design/mutex#extra-features). *NB*: The wait - cycles below are measured by `absl::base_internal::CycleClock`, and may not - correspond to core clock frequency. For more information, see the - `CycleClock` [docs](https://github.com/abseil/abseil- - cpp/blob/master/absl/base/internal/cycleclock.h). - """ - - # The number of individual mutex contentions which have occurred since - # startup. - num_contentions: int = betterproto.uint64_field(1) - # The length of the current contention wait cycle. - current_wait_cycles: int = betterproto.uint64_field(2) - # The lifetime total of all contention wait cycles. - lifetime_wait_cycles: int = betterproto.uint64_field(3) - - -@dataclass(eq=False, repr=False) -class SimpleMetric(betterproto.Message): - """Proto representation of an Envoy Counter or Gauge value.""" - - # Type of the metric represented. - type: "SimpleMetricType" = betterproto.enum_field(1) - # Current metric value. - value: int = betterproto.uint64_field(2) - # Name of the metric. - name: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class Clusters(betterproto.Message): - """ - Admin endpoint uses this wrapper for `/clusters` to display cluster status - information. See :ref:`/clusters ` for - more information. - """ - - # Mapping from cluster name to each cluster's status. - cluster_statuses: List["ClusterStatus"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ClusterStatus(betterproto.Message): - """ - Details an individual cluster's current status. [#next-free-field: 6] - """ - - # Name of the cluster. - name: str = betterproto.string_field(1) - # Denotes whether this cluster was added via API or configured statically. - added_via_api: bool = betterproto.bool_field(2) - # The success rate threshold used in the last interval. If :ref:`outlier_dete - # ction.split_external_local_origin_errors` is *false*, all errors: - # externally and locally generated were used to calculate the threshold. If : - # ref:`outlier_detection.split_external_local_origin_errors` is *true*, - # only externally generated errors were used to calculate the threshold. The - # threshold is used to eject hosts based on their success rate. See - # :ref:`Cluster outlier detection ` - # documentation for details. Note: this field may be omitted in any of the - # three following cases: 1. There were not enough hosts with enough request - # volume to proceed with success rate based outlier ejection. 2. The - # threshold is computed to be < 0 because a negative value implies that there - # was no threshold for that interval. 3. Outlier detection is not enabled - # for this cluster. - success_rate_ejection_threshold: "__type__.Percent" = betterproto.message_field(3) - # Mapping from host address to the host's current status. - host_statuses: List["HostStatus"] = betterproto.message_field(4) - # The success rate threshold used in the last interval when only locally - # originated failures were taken into account and externally originated - # errors were treated as success. This field should be interpreted only when - # :ref:`outlier_detection.split_external_local_origin_errors` is *true*. - # The threshold is used to eject hosts based on their success rate. See - # :ref:`Cluster outlier detection ` - # documentation for details. Note: this field may be omitted in any of the - # three following cases: 1. There were not enough hosts with enough request - # volume to proceed with success rate based outlier ejection. 2. The - # threshold is computed to be < 0 because a negative value implies that there - # was no threshold for that interval. 3. Outlier detection is not enabled - # for this cluster. - local_origin_success_rate_ejection_threshold: "__type__.Percent" = ( - betterproto.message_field(5) - ) - - -@dataclass(eq=False, repr=False) -class HostStatus(betterproto.Message): - """Current state of a particular host. [#next-free-field: 10]""" - - # Address of this host. - address: "__api_v2_core__.Address" = betterproto.message_field(1) - # List of stats specific to this host. - stats: List["SimpleMetric"] = betterproto.message_field(2) - # The host's current health status. - health_status: "HostHealthStatus" = betterproto.message_field(3) - # Request success rate for this host over the last calculated interval. If :r - # ef:`outlier_detection.split_external_local_origin_errors` is *false*, all - # errors: externally and locally generated were used in success rate - # calculation. If :ref:`outlier_detection.split_external_local_origin_errors< - # envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors - # >` is *true*, only externally generated errors were used in success rate - # calculation. See :ref:`Cluster outlier detection - # ` documentation for details. Note: the - # message will not be present if host did not have enough request volume to - # calculate success rate or the cluster did not have enough hosts to run - # through success rate outlier ejection. - success_rate: "__type__.Percent" = betterproto.message_field(4) - # The host's weight. If not configured, the value defaults to 1. - weight: int = betterproto.uint32_field(5) - # The hostname of the host, if applicable. - hostname: str = betterproto.string_field(6) - # The host's priority. If not configured, the value defaults to 0 (highest - # priority). - priority: int = betterproto.uint32_field(7) - # Request success rate for this host over the last calculated interval when - # only locally originated errors are taken into account and externally - # originated errors were treated as success. This field should be interpreted - # only when :ref:`outlier_detection.split_external_local_origin_errors` is - # *true*. See :ref:`Cluster outlier detection - # ` documentation for details. Note: the - # message will not be present if host did not have enough request volume to - # calculate success rate or the cluster did not have enough hosts to run - # through success rate outlier ejection. - local_origin_success_rate: "__type__.Percent" = betterproto.message_field(8) - # locality of the host. - locality: "__api_v2_core__.Locality" = betterproto.message_field(9) - - -@dataclass(eq=False, repr=False) -class HostHealthStatus(betterproto.Message): - """Health status for a host. [#next-free-field: 7]""" - - # The host is currently failing active health checks. - failed_active_health_check: bool = betterproto.bool_field(1) - # The host is currently considered an outlier and has been ejected. - failed_outlier_check: bool = betterproto.bool_field(2) - # The host is currently being marked as degraded through active health - # checking. - failed_active_degraded_check: bool = betterproto.bool_field(4) - # The host has been removed from service discovery, but is being stabilized - # due to active health checking. - pending_dynamic_removal: bool = betterproto.bool_field(5) - # The host has not yet been health checked. - pending_active_hc: bool = betterproto.bool_field(6) - # Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are - # currently supported here. [#comment:TODO(mrice32): pipe through remaining - # EDS health status possibilities.] - eds_health_status: "__api_v2_core__.HealthStatus" = betterproto.enum_field(3) - - -@dataclass(eq=False, repr=False) -class Memory(betterproto.Message): - """ - Proto representation of the internal memory consumption of an Envoy - instance. These represent values extracted from an internal TCMalloc - instance. For more information, see the section of the docs entitled - ["Generic Tcmalloc - Status"](https://gperftools.github.io/gperftools/tcmalloc.html). [#next- - free-field: 7] - """ - - # The number of bytes allocated by the heap for Envoy. This is an alias for - # `generic.current_allocated_bytes`. - allocated: int = betterproto.uint64_field(1) - # The number of bytes reserved by the heap but not necessarily allocated. - # This is an alias for `generic.heap_size`. - heap_size: int = betterproto.uint64_field(2) - # The number of bytes in free, unmapped pages in the page heap. These bytes - # always count towards virtual memory usage, and depending on the OS, - # typically do not count towards physical memory usage. This is an alias for - # `tcmalloc.pageheap_unmapped_bytes`. - pageheap_unmapped: int = betterproto.uint64_field(3) - # The number of bytes in free, mapped pages in the page heap. These bytes - # always count towards virtual memory usage, and unless the underlying memory - # is swapped out by the OS, they also count towards physical memory usage. - # This is an alias for `tcmalloc.pageheap_free_bytes`. - pageheap_free: int = betterproto.uint64_field(4) - # The amount of memory used by the TCMalloc thread caches (for small - # objects). This is an alias for `tcmalloc.current_total_thread_cache_bytes`. - total_thread_cache: int = betterproto.uint64_field(5) - # The number of bytes of the physical memory usage by the allocator. This is - # an alias for `generic.total_physical_bytes`. - total_physical_bytes: int = betterproto.uint64_field(6) - - -@dataclass(eq=False, repr=False) -class ConfigDump(betterproto.Message): - """ - The :ref:`/config_dump ` admin - endpoint uses this wrapper message to maintain and serve arbitrary - configuration information from any component in Envoy. - """ - - # This list is serialized and dumped in its entirety at the - # :ref:`/config_dump ` endpoint. The - # following configurations are currently supported and will be dumped in the - # order given below: * *bootstrap*: :ref:`BootstrapConfigDump - # ` * *clusters*: - # :ref:`ClustersConfigDump ` - # * *listeners*: :ref:`ListenersConfigDump - # ` * *routes*: - # :ref:`RoutesConfigDump ` You - # can filter output with the resource and mask query parameters. See - # :ref:`/config_dump?resource={} - # `, - # :ref:`/config_dump?mask={} - # `, or - # :ref:`/config_dump?resource={},mask={} - # ` for more - # information. - configs: List["betterproto_lib_google_protobuf.Any"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class UpdateFailureState(betterproto.Message): - # What the component configuration would have been if the update had - # succeeded. - failed_configuration: "betterproto_lib_google_protobuf.Any" = ( - betterproto.message_field(1) - ) - # Time of the latest failed update attempt. - last_update_attempt: datetime = betterproto.message_field(2) - # Details about the last failed update attempt. - details: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class BootstrapConfigDump(betterproto.Message): - """ - This message describes the bootstrap configuration that Envoy was started - with. This includes any CLI overrides that were merged. Bootstrap - configuration information can be used to recreate the static portions of an - Envoy configuration by reusing the output as the bootstrap configuration - for another Envoy. - """ - - bootstrap: "__config_bootstrap_v2__.Bootstrap" = betterproto.message_field(1) - # The timestamp when the BootstrapConfig was last updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDump(betterproto.Message): - """ - Envoy's listener manager fills this message with all currently known - listeners. Listener configuration information can be used to recreate an - Envoy configuration by populating all listeners as static listeners or by - returning them in a LDS response. - """ - - # This is the :ref:`version_info - # ` in the last processed LDS - # discovery response. If there are only static bootstrap listeners, this - # field will be "". - version_info: str = betterproto.string_field(1) - # The statically loaded listener configs. - static_listeners: List[ - "ListenersConfigDumpStaticListener" - ] = betterproto.message_field(2) - # State for any warming, active, or draining listeners. - dynamic_listeners: List[ - "ListenersConfigDumpDynamicListener" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDumpStaticListener(betterproto.Message): - """Describes a statically loaded listener.""" - - # The listener config. - listener: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # The timestamp when the Listener was last successfully updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDumpDynamicListenerState(betterproto.Message): - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` field at the time that - # the listener was loaded. In the future, discrete per-listener versions may - # be supported by the API. - version_info: str = betterproto.string_field(1) - # The listener config. - listener: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # The timestamp when the Listener was last successfully updated. - last_updated: datetime = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDumpDynamicListener(betterproto.Message): - """ - Describes a dynamically loaded listener via the LDS API. [#next-free-field: - 6] - """ - - # The name or unique id of this listener, pulled from the - # DynamicListenerState config. - name: str = betterproto.string_field(1) - # The listener state for any active listener by this name. These are - # listeners that are available to service data plane traffic. - active_state: "ListenersConfigDumpDynamicListenerState" = betterproto.message_field( - 2 - ) - # The listener state for any warming listener by this name. These are - # listeners that are currently undergoing warming in preparation to service - # data plane traffic. Note that if attempting to recreate an Envoy - # configuration from a configuration dump, the warming listeners should - # generally be discarded. - warming_state: "ListenersConfigDumpDynamicListenerState" = ( - betterproto.message_field(3) - ) - # The listener state for any draining listener by this name. These are - # listeners that are currently undergoing draining in preparation to stop - # servicing data plane traffic. Note that if attempting to recreate an Envoy - # configuration from a configuration dump, the draining listeners should - # generally be discarded. - draining_state: "ListenersConfigDumpDynamicListenerState" = ( - betterproto.message_field(4) - ) - # Set if the last update failed, cleared after the next successful update. - error_state: "UpdateFailureState" = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class ClustersConfigDump(betterproto.Message): - """ - Envoy's cluster manager fills this message with all currently known - clusters. Cluster configuration information can be used to recreate an - Envoy configuration by populating all clusters as static clusters or by - returning them in a CDS response. - """ - - # This is the :ref:`version_info - # ` in the last processed CDS - # discovery response. If there are only static bootstrap clusters, this field - # will be "". - version_info: str = betterproto.string_field(1) - # The statically loaded cluster configs. - static_clusters: List[ - "ClustersConfigDumpStaticCluster" - ] = betterproto.message_field(2) - # The dynamically loaded active clusters. These are clusters that are - # available to service data plane traffic. - dynamic_active_clusters: List[ - "ClustersConfigDumpDynamicCluster" - ] = betterproto.message_field(3) - # The dynamically loaded warming clusters. These are clusters that are - # currently undergoing warming in preparation to service data plane traffic. - # Note that if attempting to recreate an Envoy configuration from a - # configuration dump, the warming clusters should generally be discarded. - dynamic_warming_clusters: List[ - "ClustersConfigDumpDynamicCluster" - ] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClustersConfigDumpStaticCluster(betterproto.Message): - """Describes a statically loaded cluster.""" - - # The cluster config. - cluster: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # The timestamp when the Cluster was last updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClustersConfigDumpDynamicCluster(betterproto.Message): - """Describes a dynamically loaded cluster via the CDS API.""" - - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` field at the time that - # the cluster was loaded. In the future, discrete per-cluster versions may be - # supported by the API. - version_info: str = betterproto.string_field(1) - # The cluster config. - cluster: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # The timestamp when the Cluster was last updated. - last_updated: datetime = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class RoutesConfigDump(betterproto.Message): - """ - Envoy's RDS implementation fills this message with all currently loaded - routes, as described by their RouteConfiguration objects. Static routes - that are either defined in the bootstrap configuration or defined inline - while configuring listeners are separated from those configured dynamically - via RDS. Route configuration information can be used to recreate an Envoy - configuration by populating all routes as static routes or by returning - them in RDS responses. - """ - - # The statically loaded route configs. - static_route_configs: List[ - "RoutesConfigDumpStaticRouteConfig" - ] = betterproto.message_field(2) - # The dynamically loaded route configs. - dynamic_route_configs: List[ - "RoutesConfigDumpDynamicRouteConfig" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class RoutesConfigDumpStaticRouteConfig(betterproto.Message): - # The route config. - route_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # The timestamp when the Route was last updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RoutesConfigDumpDynamicRouteConfig(betterproto.Message): - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` field at the time that - # the route configuration was loaded. - version_info: str = betterproto.string_field(1) - # The route config. - route_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # The timestamp when the Route was last updated. - last_updated: datetime = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesConfigDump(betterproto.Message): - """ - Envoy's scoped RDS implementation fills this message with all currently - loaded route configuration scopes (defined via ScopedRouteConfigurationsSet - protos). This message lists both the scopes defined inline with the higher - order object (i.e., the HttpConnectionManager) and the dynamically obtained - scopes via the SRDS API. - """ - - # The statically loaded scoped route configs. - inline_scoped_route_configs: List[ - "ScopedRoutesConfigDumpInlineScopedRouteConfigs" - ] = betterproto.message_field(1) - # The dynamically loaded scoped route configs. - dynamic_scoped_route_configs: List[ - "ScopedRoutesConfigDumpDynamicScopedRouteConfigs" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesConfigDumpInlineScopedRouteConfigs(betterproto.Message): - # The name assigned to the scoped route configurations. - name: str = betterproto.string_field(1) - # The scoped route configurations. - scoped_route_configs: List[ - "betterproto_lib_google_protobuf.Any" - ] = betterproto.message_field(2) - # The timestamp when the scoped route config set was last updated. - last_updated: datetime = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesConfigDumpDynamicScopedRouteConfigs(betterproto.Message): - # The name assigned to the scoped route configurations. - name: str = betterproto.string_field(1) - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` field at the time that - # the scoped routes configuration was loaded. - version_info: str = betterproto.string_field(2) - # The scoped route configurations. - scoped_route_configs: List[ - "betterproto_lib_google_protobuf.Any" - ] = betterproto.message_field(3) - # The timestamp when the scoped route config set was last updated. - last_updated: datetime = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class SecretsConfigDump(betterproto.Message): - """ - Envoys SDS implementation fills this message with all secrets fetched - dynamically via SDS. - """ - - # The statically loaded secrets. - static_secrets: List["SecretsConfigDumpStaticSecret"] = betterproto.message_field(1) - # The dynamically loaded active secrets. These are secrets that are available - # to service clusters or listeners. - dynamic_active_secrets: List[ - "SecretsConfigDumpDynamicSecret" - ] = betterproto.message_field(2) - # The dynamically loaded warming secrets. These are secrets that are - # currently undergoing warming in preparation to service clusters or - # listeners. - dynamic_warming_secrets: List[ - "SecretsConfigDumpDynamicSecret" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class SecretsConfigDumpDynamicSecret(betterproto.Message): - """DynamicSecret contains secret information fetched via SDS.""" - - # The name assigned to the secret. - name: str = betterproto.string_field(1) - # This is the per-resource version information. - version_info: str = betterproto.string_field(2) - # The timestamp when the secret was last updated. - last_updated: datetime = betterproto.message_field(3) - # The actual secret information. Security sensitive information is redacted - # (replaced with "[redacted]") for private keys and passwords in TLS - # certificates. - secret: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class SecretsConfigDumpStaticSecret(betterproto.Message): - """StaticSecret specifies statically loaded secret in bootstrap.""" - - # The name assigned to the secret. - name: str = betterproto.string_field(1) - # The timestamp when the secret was last updated. - last_updated: datetime = betterproto.message_field(2) - # The actual secret information. Security sensitive information is redacted - # (replaced with "[redacted]") for private keys and passwords in TLS - # certificates. - secret: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ServerInfo(betterproto.Message): - """ - Proto representation of the value returned by /server_info, containing - server version/server status information. [#next-free-field: 7] - """ - - # Server version. - version: str = betterproto.string_field(1) - # State of the server. - state: "ServerInfoState" = betterproto.enum_field(2) - # Uptime since current epoch was started. - uptime_current_epoch: timedelta = betterproto.message_field(3) - # Uptime since the start of the first epoch. - uptime_all_epochs: timedelta = betterproto.message_field(4) - # Hot restart version. - hot_restart_version: str = betterproto.string_field(5) - # Command line options the server is currently running with. - command_line_options: "CommandLineOptions" = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class CommandLineOptions(betterproto.Message): - """[#next-free-field: 29]""" - - # See :option:`--base-id` for details. - base_id: int = betterproto.uint64_field(1) - # See :option:`--concurrency` for details. - concurrency: int = betterproto.uint32_field(2) - # See :option:`--config-path` for details. - config_path: str = betterproto.string_field(3) - # See :option:`--config-yaml` for details. - config_yaml: str = betterproto.string_field(4) - # See :option:`--allow-unknown-static-fields` for details. - allow_unknown_static_fields: bool = betterproto.bool_field(5) - # See :option:`--reject-unknown-dynamic-fields` for details. - reject_unknown_dynamic_fields: bool = betterproto.bool_field(26) - # See :option:`--admin-address-path` for details. - admin_address_path: str = betterproto.string_field(6) - # See :option:`--local-address-ip-version` for details. - local_address_ip_version: "CommandLineOptionsIpVersion" = betterproto.enum_field(7) - # See :option:`--log-level` for details. - log_level: str = betterproto.string_field(8) - # See :option:`--component-log-level` for details. - component_log_level: str = betterproto.string_field(9) - # See :option:`--log-format` for details. - log_format: str = betterproto.string_field(10) - # See :option:`--log-format-escaped` for details. - log_format_escaped: bool = betterproto.bool_field(27) - # See :option:`--log-path` for details. - log_path: str = betterproto.string_field(11) - # See :option:`--service-cluster` for details. - service_cluster: str = betterproto.string_field(13) - # See :option:`--service-node` for details. - service_node: str = betterproto.string_field(14) - # See :option:`--service-zone` for details. - service_zone: str = betterproto.string_field(15) - # See :option:`--file-flush-interval-msec` for details. - file_flush_interval: timedelta = betterproto.message_field(16) - # See :option:`--drain-time-s` for details. - drain_time: timedelta = betterproto.message_field(17) - # See :option:`--parent-shutdown-time-s` for details. - parent_shutdown_time: timedelta = betterproto.message_field(18) - # See :option:`--mode` for details. - mode: "CommandLineOptionsMode" = betterproto.enum_field(19) - # max_stats and max_obj_name_len are now unused and have no effect. - max_stats: int = betterproto.uint64_field(20) - max_obj_name_len: int = betterproto.uint64_field(21) - # See :option:`--disable-hot-restart` for details. - disable_hot_restart: bool = betterproto.bool_field(22) - # See :option:`--enable-mutex-tracing` for details. - enable_mutex_tracing: bool = betterproto.bool_field(23) - # See :option:`--restart-epoch` for details. - restart_epoch: int = betterproto.uint32_field(24) - # See :option:`--cpuset-threads` for details. - cpuset_threads: bool = betterproto.bool_field(25) - # See :option:`--disable-extensions` for details. - disabled_extensions: List[str] = betterproto.string_field(28) - - def __post_init__(self) -> None: - super().__post_init__() - if self.max_stats: - warnings.warn( - "CommandLineOptions.max_stats is deprecated", DeprecationWarning - ) - if self.max_obj_name_len: - warnings.warn( - "CommandLineOptions.max_obj_name_len is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class TapRequest(betterproto.Message): - """ - The /tap admin request body that is used to configure an active tap - session. - """ - - # The opaque configuration ID used to match the configuration to a loaded - # extension. A tap extension configures a similar opaque ID that is used to - # match. - config_id: str = betterproto.string_field(1) - # The tap configuration to load. - tap_config: "__service_tap_v2_alpha__.TapConfig" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Listeners(betterproto.Message): - """ - Admin endpoint uses this wrapper for `/listeners` to display listener - status information. See :ref:`/listeners - ` for more information. - """ - - # List of listener statuses. - listener_statuses: List["ListenerStatus"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ListenerStatus(betterproto.Message): - """Details an individual listener's current status.""" - - # Name of the listener - name: str = betterproto.string_field(1) - # The actual local address that the listener is listening on. If a listener - # was configured to listen on port 0, then this address has the port that was - # allocated by the OS. - local_address: "__api_v2_core__.Address" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Certificates(betterproto.Message): - """ - Proto representation of certificate details. Admin endpoint uses this - wrapper for `/certs` to display certificate information. See :ref:`/certs - ` for more information. - """ - - # List of certificates known to an Envoy. - certificates: List["Certificate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Certificate(betterproto.Message): - # Details of CA certificate. - ca_cert: List["CertificateDetails"] = betterproto.message_field(1) - # Details of Certificate Chain - cert_chain: List["CertificateDetails"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class CertificateDetails(betterproto.Message): - """[#next-free-field: 7]""" - - # Path of the certificate. - path: str = betterproto.string_field(1) - # Certificate Serial Number. - serial_number: str = betterproto.string_field(2) - # List of Subject Alternate names. - subject_alt_names: List["SubjectAlternateName"] = betterproto.message_field(3) - # Minimum of days until expiration of certificate and it's chain. - days_until_expiration: int = betterproto.uint64_field(4) - # Indicates the time from which the certificate is valid. - valid_from: datetime = betterproto.message_field(5) - # Indicates the time at which the certificate expires. - expiration_time: datetime = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class SubjectAlternateName(betterproto.Message): - dns: str = betterproto.string_field(1, group="name") - uri: str = betterproto.string_field(2, group="name") - ip_address: str = betterproto.string_field(3, group="name") - - -from ... import type as __type__ -from ...api.v2 import core as __api_v2_core__ -from ...config.bootstrap import v2 as __config_bootstrap_v2__ -from ...service.tap import v2alpha as __service_tap_v2_alpha__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/admin/v3/__init__.py b/src/envoy_data_plane/envoy/admin/v3/__init__.py deleted file mode 100644 index 54f916b..0000000 --- a/src/envoy_data_plane/envoy/admin/v3/__init__.py +++ /dev/null @@ -1,943 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/admin/v3/certs.proto, envoy/admin/v3/clusters.proto, envoy/admin/v3/config_dump.proto, envoy/admin/v3/init_dump.proto, envoy/admin/v3/listeners.proto, envoy/admin/v3/memory.proto, envoy/admin/v3/metrics.proto, envoy/admin/v3/mutex_stats.proto, envoy/admin/v3/server_info.proto, envoy/admin/v3/tap.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime, timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class SimpleMetricType(betterproto.Enum): - COUNTER = 0 - GAUGE = 1 - - -class ClientResourceStatus(betterproto.Enum): - """ - Resource status from the view of a xDS client, which tells the - synchronization status between the xDS client and the xDS server. - """ - - # Resource status is not available/unknown. - UNKNOWN = 0 - # Client requested this resource but hasn't received any update from - # management server. The client will not fail requests, but will queue them - # until update arrives or the client times out waiting for the resource. - REQUESTED = 1 - # This resource has been requested by the client but has either not been - # delivered by the server or was previously delivered by the server and then - # subsequently removed from resources provided by the server. For more - # information, please refer to the :ref:`"Knowing When a Requested Resource - # Does Not Exist" ` section. - DOES_NOT_EXIST = 2 - # Client received this resource and replied with ACK. - ACKED = 3 - # Client received this resource and replied with NACK. - NACKED = 4 - - -class ServerInfoState(betterproto.Enum): - LIVE = 0 - DRAINING = 1 - PRE_INITIALIZING = 2 - INITIALIZING = 3 - - -class CommandLineOptionsIpVersion(betterproto.Enum): - v4 = 0 - v6 = 1 - - -class CommandLineOptionsMode(betterproto.Enum): - Serve = 0 - Validate = 1 - InitOnly = 2 - - -class CommandLineOptionsDrainStrategy(betterproto.Enum): - Gradual = 0 - Immediate = 1 - - -@dataclass(eq=False, repr=False) -class MutexStats(betterproto.Message): - """ - Proto representation of the statistics collected upon absl::Mutex - contention, if Envoy is run under :option:`--enable-mutex-tracing`. For - more information, see the `absl::Mutex` - [docs](https://abseil.io/about/design/mutex#extra-features). *NB*: The wait - cycles below are measured by `absl::base_internal::CycleClock`, and may not - correspond to core clock frequency. For more information, see the - `CycleClock` [docs](https://github.com/abseil/abseil- - cpp/blob/master/absl/base/internal/cycleclock.h). - """ - - # The number of individual mutex contentions which have occurred since - # startup. - num_contentions: int = betterproto.uint64_field(1) - # The length of the current contention wait cycle. - current_wait_cycles: int = betterproto.uint64_field(2) - # The lifetime total of all contention wait cycles. - lifetime_wait_cycles: int = betterproto.uint64_field(3) - - -@dataclass(eq=False, repr=False) -class SimpleMetric(betterproto.Message): - """Proto representation of an Envoy Counter or Gauge value.""" - - # Type of the metric represented. - type: "SimpleMetricType" = betterproto.enum_field(1) - # Current metric value. - value: int = betterproto.uint64_field(2) - # Name of the metric. - name: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class Clusters(betterproto.Message): - """ - Admin endpoint uses this wrapper for `/clusters` to display cluster status - information. See :ref:`/clusters ` for - more information. - """ - - # Mapping from cluster name to each cluster's status. - cluster_statuses: List["ClusterStatus"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ClusterStatus(betterproto.Message): - """ - Details an individual cluster's current status. [#next-free-field: 8] - """ - - # Name of the cluster. - name: str = betterproto.string_field(1) - # Denotes whether this cluster was added via API or configured statically. - added_via_api: bool = betterproto.bool_field(2) - # The success rate threshold used in the last interval. If :ref:`outlier_dete - # ction.split_external_local_origin_errors` is *false*, all - # errors: externally and locally generated were used to calculate the - # threshold. If :ref:`outlier_detection.split_external_local_origin_errors` is *true*, only externally generated errors were used to - # calculate the threshold. The threshold is used to eject hosts based on - # their success rate. See :ref:`Cluster outlier detection - # ` documentation for details. Note: this - # field may be omitted in any of the three following cases: 1. There were not - # enough hosts with enough request volume to proceed with success rate based - # outlier ejection. 2. The threshold is computed to be < 0 because a negative - # value implies that there was no threshold for that interval. 3. Outlier - # detection is not enabled for this cluster. - success_rate_ejection_threshold: "__type_v3__.Percent" = betterproto.message_field( - 3 - ) - # Mapping from host address to the host's current status. - host_statuses: List["HostStatus"] = betterproto.message_field(4) - # The success rate threshold used in the last interval when only locally - # originated failures were taken into account and externally originated - # errors were treated as success. This field should be interpreted only when - # :ref:`outlier_detection.split_external_local_origin_errors` - # is *true*. The threshold is used to eject hosts based on their success - # rate. See :ref:`Cluster outlier detection - # ` documentation for details. Note: this - # field may be omitted in any of the three following cases: 1. There were not - # enough hosts with enough request volume to proceed with success rate based - # outlier ejection. 2. The threshold is computed to be < 0 because a negative - # value implies that there was no threshold for that interval. 3. Outlier - # detection is not enabled for this cluster. - local_origin_success_rate_ejection_threshold: "__type_v3__.Percent" = ( - betterproto.message_field(5) - ) - # :ref:`Circuit breaking ` settings of the - # cluster. - circuit_breakers: "__config_cluster_v3__.CircuitBreakers" = ( - betterproto.message_field(6) - ) - # Observability name of the cluster. - observability_name: str = betterproto.string_field(7) - - -@dataclass(eq=False, repr=False) -class HostStatus(betterproto.Message): - """Current state of a particular host. [#next-free-field: 10]""" - - # Address of this host. - address: "__config_core_v3__.Address" = betterproto.message_field(1) - # List of stats specific to this host. - stats: List["SimpleMetric"] = betterproto.message_field(2) - # The host's current health status. - health_status: "HostHealthStatus" = betterproto.message_field(3) - # Request success rate for this host over the last calculated interval. If :r - # ef:`outlier_detection.split_external_local_origin_errors` is - # *false*, all errors: externally and locally generated were used in success - # rate calculation. If :ref:`outlier_detection.split_external_local_origin_er - # rors` is *true*, only externally generated errors were used - # in success rate calculation. See :ref:`Cluster outlier detection - # ` documentation for details. Note: the - # message will not be present if host did not have enough request volume to - # calculate success rate or the cluster did not have enough hosts to run - # through success rate outlier ejection. - success_rate: "__type_v3__.Percent" = betterproto.message_field(4) - # The host's weight. If not configured, the value defaults to 1. - weight: int = betterproto.uint32_field(5) - # The hostname of the host, if applicable. - hostname: str = betterproto.string_field(6) - # The host's priority. If not configured, the value defaults to 0 (highest - # priority). - priority: int = betterproto.uint32_field(7) - # Request success rate for this host over the last calculated interval when - # only locally originated errors are taken into account and externally - # originated errors were treated as success. This field should be interpreted - # only when :ref:`outlier_detection.split_external_local_origin_errors` is *true*. See :ref:`Cluster outlier detection - # ` documentation for details. Note: the - # message will not be present if host did not have enough request volume to - # calculate success rate or the cluster did not have enough hosts to run - # through success rate outlier ejection. - local_origin_success_rate: "__type_v3__.Percent" = betterproto.message_field(8) - # locality of the host. - locality: "__config_core_v3__.Locality" = betterproto.message_field(9) - - -@dataclass(eq=False, repr=False) -class HostHealthStatus(betterproto.Message): - """Health status for a host. [#next-free-field: 9]""" - - # The host is currently failing active health checks. - failed_active_health_check: bool = betterproto.bool_field(1) - # The host is currently considered an outlier and has been ejected. - failed_outlier_check: bool = betterproto.bool_field(2) - # The host is currently being marked as degraded through active health - # checking. - failed_active_degraded_check: bool = betterproto.bool_field(4) - # The host has been removed from service discovery, but is being stabilized - # due to active health checking. - pending_dynamic_removal: bool = betterproto.bool_field(5) - # The host has not yet been health checked. - pending_active_hc: bool = betterproto.bool_field(6) - # The host should be excluded from panic, spillover, etc. calculations - # because it was explicitly taken out of rotation via protocol signal and is - # not meant to be routed to. - excluded_via_immediate_hc_fail: bool = betterproto.bool_field(7) - # The host failed active HC due to timeout. - active_hc_timeout: bool = betterproto.bool_field(8) - # Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are - # currently supported here. [#comment:TODO(mrice32): pipe through remaining - # EDS health status possibilities.] - eds_health_status: "__config_core_v3__.HealthStatus" = betterproto.enum_field(3) - - -@dataclass(eq=False, repr=False) -class Memory(betterproto.Message): - """ - Proto representation of the internal memory consumption of an Envoy - instance. These represent values extracted from an internal TCMalloc - instance. For more information, see the section of the docs entitled - ["Generic Tcmalloc - Status"](https://gperftools.github.io/gperftools/tcmalloc.html). [#next- - free-field: 7] - """ - - # The number of bytes allocated by the heap for Envoy. This is an alias for - # `generic.current_allocated_bytes`. - allocated: int = betterproto.uint64_field(1) - # The number of bytes reserved by the heap but not necessarily allocated. - # This is an alias for `generic.heap_size`. - heap_size: int = betterproto.uint64_field(2) - # The number of bytes in free, unmapped pages in the page heap. These bytes - # always count towards virtual memory usage, and depending on the OS, - # typically do not count towards physical memory usage. This is an alias for - # `tcmalloc.pageheap_unmapped_bytes`. - pageheap_unmapped: int = betterproto.uint64_field(3) - # The number of bytes in free, mapped pages in the page heap. These bytes - # always count towards virtual memory usage, and unless the underlying memory - # is swapped out by the OS, they also count towards physical memory usage. - # This is an alias for `tcmalloc.pageheap_free_bytes`. - pageheap_free: int = betterproto.uint64_field(4) - # The amount of memory used by the TCMalloc thread caches (for small - # objects). This is an alias for `tcmalloc.current_total_thread_cache_bytes`. - total_thread_cache: int = betterproto.uint64_field(5) - # The number of bytes of the physical memory usage by the allocator. This is - # an alias for `generic.total_physical_bytes`. - total_physical_bytes: int = betterproto.uint64_field(6) - - -@dataclass(eq=False, repr=False) -class ConfigDump(betterproto.Message): - """ - The :ref:`/config_dump ` admin - endpoint uses this wrapper message to maintain and serve arbitrary - configuration information from any component in Envoy. - """ - - # This list is serialized and dumped in its entirety at the - # :ref:`/config_dump ` endpoint. The - # following configurations are currently supported and will be dumped in the - # order given below: * *bootstrap*: :ref:`BootstrapConfigDump - # ` * *clusters*: - # :ref:`ClustersConfigDump ` * - # *endpoints*: :ref:`EndpointsConfigDump - # ` * *listeners*: - # :ref:`ListenersConfigDump ` - # * *scoped_routes*: :ref:`ScopedRoutesConfigDump - # ` * *routes*: - # :ref:`RoutesConfigDump ` * - # *secrets*: :ref:`SecretsConfigDump - # ` EDS Configuration will only - # be dumped by using parameter `?include_eds` You can filter output with the - # resource and mask query parameters. See :ref:`/config_dump?resource={} - # `, - # :ref:`/config_dump?mask={} - # `, or - # :ref:`/config_dump?resource={},mask={} - # ` for more - # information. - configs: List["betterproto_lib_google_protobuf.Any"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class UpdateFailureState(betterproto.Message): - # What the component configuration would have been if the update had - # succeeded. This field may not be populated by xDS clients due to storage - # overhead. - failed_configuration: "betterproto_lib_google_protobuf.Any" = ( - betterproto.message_field(1) - ) - # Time of the latest failed update attempt. - last_update_attempt: datetime = betterproto.message_field(2) - # Details about the last failed update attempt. - details: str = betterproto.string_field(3) - # This is the version of the rejected resource. [#not-implemented-hide:] - version_info: str = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class BootstrapConfigDump(betterproto.Message): - """ - This message describes the bootstrap configuration that Envoy was started - with. This includes any CLI overrides that were merged. Bootstrap - configuration information can be used to recreate the static portions of an - Envoy configuration by reusing the output as the bootstrap configuration - for another Envoy. - """ - - bootstrap: "__config_bootstrap_v3__.Bootstrap" = betterproto.message_field(1) - # The timestamp when the BootstrapConfig was last updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDump(betterproto.Message): - """ - Envoy's listener manager fills this message with all currently known - listeners. Listener configuration information can be used to recreate an - Envoy configuration by populating all listeners as static listeners or by - returning them in a LDS response. - """ - - # This is the :ref:`version_info - # ` - # in the last processed LDS discovery response. If there are only static - # bootstrap listeners, this field will be "". - version_info: str = betterproto.string_field(1) - # The statically loaded listener configs. - static_listeners: List[ - "ListenersConfigDumpStaticListener" - ] = betterproto.message_field(2) - # State for any warming, active, or draining listeners. - dynamic_listeners: List[ - "ListenersConfigDumpDynamicListener" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDumpStaticListener(betterproto.Message): - """Describes a statically loaded listener.""" - - # The listener config. - listener: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # The timestamp when the Listener was last successfully updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDumpDynamicListenerState(betterproto.Message): - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` - # field at the time that the listener was loaded. In the future, discrete - # per-listener versions may be supported by the API. - version_info: str = betterproto.string_field(1) - # The listener config. - listener: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # The timestamp when the Listener was last successfully updated. - last_updated: datetime = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ListenersConfigDumpDynamicListener(betterproto.Message): - """ - Describes a dynamically loaded listener via the LDS API. [#next-free-field: - 7] - """ - - # The name or unique id of this listener, pulled from the - # DynamicListenerState config. - name: str = betterproto.string_field(1) - # The listener state for any active listener by this name. These are - # listeners that are available to service data plane traffic. - active_state: "ListenersConfigDumpDynamicListenerState" = betterproto.message_field( - 2 - ) - # The listener state for any warming listener by this name. These are - # listeners that are currently undergoing warming in preparation to service - # data plane traffic. Note that if attempting to recreate an Envoy - # configuration from a configuration dump, the warming listeners should - # generally be discarded. - warming_state: "ListenersConfigDumpDynamicListenerState" = ( - betterproto.message_field(3) - ) - # The listener state for any draining listener by this name. These are - # listeners that are currently undergoing draining in preparation to stop - # servicing data plane traffic. Note that if attempting to recreate an Envoy - # configuration from a configuration dump, the draining listeners should - # generally be discarded. - draining_state: "ListenersConfigDumpDynamicListenerState" = ( - betterproto.message_field(4) - ) - # Set if the last update failed, cleared after the next successful update. - # The *error_state* field contains the rejected version of this particular - # resource along with the reason and timestamp. For successfully updated or - # acknowledged resource, this field should be empty. - error_state: "UpdateFailureState" = betterproto.message_field(5) - # The client status of this resource. [#not-implemented-hide:] - client_status: "ClientResourceStatus" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class ClustersConfigDump(betterproto.Message): - """ - Envoy's cluster manager fills this message with all currently known - clusters. Cluster configuration information can be used to recreate an - Envoy configuration by populating all clusters as static clusters or by - returning them in a CDS response. - """ - - # This is the :ref:`version_info - # ` - # in the last processed CDS discovery response. If there are only static - # bootstrap clusters, this field will be "". - version_info: str = betterproto.string_field(1) - # The statically loaded cluster configs. - static_clusters: List[ - "ClustersConfigDumpStaticCluster" - ] = betterproto.message_field(2) - # The dynamically loaded active clusters. These are clusters that are - # available to service data plane traffic. - dynamic_active_clusters: List[ - "ClustersConfigDumpDynamicCluster" - ] = betterproto.message_field(3) - # The dynamically loaded warming clusters. These are clusters that are - # currently undergoing warming in preparation to service data plane traffic. - # Note that if attempting to recreate an Envoy configuration from a - # configuration dump, the warming clusters should generally be discarded. - dynamic_warming_clusters: List[ - "ClustersConfigDumpDynamicCluster" - ] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClustersConfigDumpStaticCluster(betterproto.Message): - """Describes a statically loaded cluster.""" - - # The cluster config. - cluster: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # The timestamp when the Cluster was last updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClustersConfigDumpDynamicCluster(betterproto.Message): - """ - Describes a dynamically loaded cluster via the CDS API. [#next-free-field: - 6] - """ - - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` - # field at the time that the cluster was loaded. In the future, discrete per- - # cluster versions may be supported by the API. - version_info: str = betterproto.string_field(1) - # The cluster config. - cluster: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # The timestamp when the Cluster was last updated. - last_updated: datetime = betterproto.message_field(3) - # Set if the last update failed, cleared after the next successful update. - # The *error_state* field contains the rejected version of this particular - # resource along with the reason and timestamp. For successfully updated or - # acknowledged resource, this field should be empty. [#not-implemented-hide:] - error_state: "UpdateFailureState" = betterproto.message_field(4) - # The client status of this resource. [#not-implemented-hide:] - client_status: "ClientResourceStatus" = betterproto.enum_field(5) - - -@dataclass(eq=False, repr=False) -class RoutesConfigDump(betterproto.Message): - """ - Envoy's RDS implementation fills this message with all currently loaded - routes, as described by their RouteConfiguration objects. Static routes - that are either defined in the bootstrap configuration or defined inline - while configuring listeners are separated from those configured dynamically - via RDS. Route configuration information can be used to recreate an Envoy - configuration by populating all routes as static routes or by returning - them in RDS responses. - """ - - # The statically loaded route configs. - static_route_configs: List[ - "RoutesConfigDumpStaticRouteConfig" - ] = betterproto.message_field(2) - # The dynamically loaded route configs. - dynamic_route_configs: List[ - "RoutesConfigDumpDynamicRouteConfig" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class RoutesConfigDumpStaticRouteConfig(betterproto.Message): - # The route config. - route_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # The timestamp when the Route was last updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RoutesConfigDumpDynamicRouteConfig(betterproto.Message): - """[#next-free-field: 6]""" - - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` - # field at the time that the route configuration was loaded. - version_info: str = betterproto.string_field(1) - # The route config. - route_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # The timestamp when the Route was last updated. - last_updated: datetime = betterproto.message_field(3) - # Set if the last update failed, cleared after the next successful update. - # The *error_state* field contains the rejected version of this particular - # resource along with the reason and timestamp. For successfully updated or - # acknowledged resource, this field should be empty. [#not-implemented-hide:] - error_state: "UpdateFailureState" = betterproto.message_field(4) - # The client status of this resource. [#not-implemented-hide:] - client_status: "ClientResourceStatus" = betterproto.enum_field(5) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesConfigDump(betterproto.Message): - """ - Envoy's scoped RDS implementation fills this message with all currently - loaded route configuration scopes (defined via ScopedRouteConfigurationsSet - protos). This message lists both the scopes defined inline with the higher - order object (i.e., the HttpConnectionManager) and the dynamically obtained - scopes via the SRDS API. - """ - - # The statically loaded scoped route configs. - inline_scoped_route_configs: List[ - "ScopedRoutesConfigDumpInlineScopedRouteConfigs" - ] = betterproto.message_field(1) - # The dynamically loaded scoped route configs. - dynamic_scoped_route_configs: List[ - "ScopedRoutesConfigDumpDynamicScopedRouteConfigs" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesConfigDumpInlineScopedRouteConfigs(betterproto.Message): - # The name assigned to the scoped route configurations. - name: str = betterproto.string_field(1) - # The scoped route configurations. - scoped_route_configs: List[ - "betterproto_lib_google_protobuf.Any" - ] = betterproto.message_field(2) - # The timestamp when the scoped route config set was last updated. - last_updated: datetime = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesConfigDumpDynamicScopedRouteConfigs(betterproto.Message): - """[#next-free-field: 7]""" - - # The name assigned to the scoped route configurations. - name: str = betterproto.string_field(1) - # This is the per-resource version information. This version is currently - # taken from the :ref:`version_info - # ` - # field at the time that the scoped routes configuration was loaded. - version_info: str = betterproto.string_field(2) - # The scoped route configurations. - scoped_route_configs: List[ - "betterproto_lib_google_protobuf.Any" - ] = betterproto.message_field(3) - # The timestamp when the scoped route config set was last updated. - last_updated: datetime = betterproto.message_field(4) - # Set if the last update failed, cleared after the next successful update. - # The *error_state* field contains the rejected version of this particular - # resource along with the reason and timestamp. For successfully updated or - # acknowledged resource, this field should be empty. [#not-implemented-hide:] - error_state: "UpdateFailureState" = betterproto.message_field(5) - # The client status of this resource. [#not-implemented-hide:] - client_status: "ClientResourceStatus" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class SecretsConfigDump(betterproto.Message): - """ - Envoys SDS implementation fills this message with all secrets fetched - dynamically via SDS. - """ - - # The statically loaded secrets. - static_secrets: List["SecretsConfigDumpStaticSecret"] = betterproto.message_field(1) - # The dynamically loaded active secrets. These are secrets that are available - # to service clusters or listeners. - dynamic_active_secrets: List[ - "SecretsConfigDumpDynamicSecret" - ] = betterproto.message_field(2) - # The dynamically loaded warming secrets. These are secrets that are - # currently undergoing warming in preparation to service clusters or - # listeners. - dynamic_warming_secrets: List[ - "SecretsConfigDumpDynamicSecret" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class SecretsConfigDumpDynamicSecret(betterproto.Message): - """ - DynamicSecret contains secret information fetched via SDS. [#next-free- - field: 7] - """ - - # The name assigned to the secret. - name: str = betterproto.string_field(1) - # This is the per-resource version information. - version_info: str = betterproto.string_field(2) - # The timestamp when the secret was last updated. - last_updated: datetime = betterproto.message_field(3) - # The actual secret information. Security sensitive information is redacted - # (replaced with "[redacted]") for private keys and passwords in TLS - # certificates. - secret: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(4) - # Set if the last update failed, cleared after the next successful update. - # The *error_state* field contains the rejected version of this particular - # resource along with the reason and timestamp. For successfully updated or - # acknowledged resource, this field should be empty. [#not-implemented-hide:] - error_state: "UpdateFailureState" = betterproto.message_field(5) - # The client status of this resource. [#not-implemented-hide:] - client_status: "ClientResourceStatus" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class SecretsConfigDumpStaticSecret(betterproto.Message): - """StaticSecret specifies statically loaded secret in bootstrap.""" - - # The name assigned to the secret. - name: str = betterproto.string_field(1) - # The timestamp when the secret was last updated. - last_updated: datetime = betterproto.message_field(2) - # The actual secret information. Security sensitive information is redacted - # (replaced with "[redacted]") for private keys and passwords in TLS - # certificates. - secret: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class EndpointsConfigDump(betterproto.Message): - """ - Envoy's admin fill this message with all currently known endpoints. - Endpoint configuration information can be used to recreate an Envoy - configuration by populating all endpoints as static endpoints or by - returning them in an EDS response. - """ - - # The statically loaded endpoint configs. - static_endpoint_configs: List[ - "EndpointsConfigDumpStaticEndpointConfig" - ] = betterproto.message_field(2) - # The dynamically loaded endpoint configs. - dynamic_endpoint_configs: List[ - "EndpointsConfigDumpDynamicEndpointConfig" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class EndpointsConfigDumpStaticEndpointConfig(betterproto.Message): - # The endpoint config. - endpoint_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 1 - ) - # [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - last_updated: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class EndpointsConfigDumpDynamicEndpointConfig(betterproto.Message): - """[#next-free-field: 6]""" - - # [#not-implemented-hide:] This is the per-resource version information. This - # version is currently taken from the :ref:`version_info - # ` - # field at the time that the endpoint configuration was loaded. - version_info: str = betterproto.string_field(1) - # The endpoint config. - endpoint_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 2 - ) - # [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - last_updated: datetime = betterproto.message_field(3) - # Set if the last update failed, cleared after the next successful update. - # The *error_state* field contains the rejected version of this particular - # resource along with the reason and timestamp. For successfully updated or - # acknowledged resource, this field should be empty. [#not-implemented-hide:] - error_state: "UpdateFailureState" = betterproto.message_field(4) - # The client status of this resource. [#not-implemented-hide:] - client_status: "ClientResourceStatus" = betterproto.enum_field(5) - - -@dataclass(eq=False, repr=False) -class UnreadyTargetsDumps(betterproto.Message): - """ - Dumps of unready targets of envoy init managers. Envoy's admin fills this - message with init managers, which provides the information of their unready - targets. The :ref:`/init_dump ` will - dump all unready targets information. - """ - - # You can choose specific component to dump unready targets with mask query - # parameter. See :ref:`/init_dump?mask={} - # ` for more information. The - # dumps of unready targets of all init managers. - unready_targets_dumps: List[ - "UnreadyTargetsDumpsUnreadyTargetsDump" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class UnreadyTargetsDumpsUnreadyTargetsDump(betterproto.Message): - """Message of unready targets information of an init manager.""" - - # Name of the init manager. Example: "init_manager_xxx". - name: str = betterproto.string_field(1) - # Names of unready targets of the init manager. Example: "target_xxx". - target_names: List[str] = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ServerInfo(betterproto.Message): - """ - Proto representation of the value returned by /server_info, containing - server version/server status information. [#next-free-field: 8] - """ - - # Server version. - version: str = betterproto.string_field(1) - # State of the server. - state: "ServerInfoState" = betterproto.enum_field(2) - # Uptime since current epoch was started. - uptime_current_epoch: timedelta = betterproto.message_field(3) - # Uptime since the start of the first epoch. - uptime_all_epochs: timedelta = betterproto.message_field(4) - # Hot restart version. - hot_restart_version: str = betterproto.string_field(5) - # Command line options the server is currently running with. - command_line_options: "CommandLineOptions" = betterproto.message_field(6) - # Populated node identity of this server. - node: "__config_core_v3__.Node" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class CommandLineOptions(betterproto.Message): - """[#next-free-field: 39]""" - - # See :option:`--base-id` for details. - base_id: int = betterproto.uint64_field(1) - # See :option:`--use-dynamic-base-id` for details. - use_dynamic_base_id: bool = betterproto.bool_field(31) - # See :option:`--base-id-path` for details. - base_id_path: str = betterproto.string_field(32) - # See :option:`--concurrency` for details. - concurrency: int = betterproto.uint32_field(2) - # See :option:`--config-path` for details. - config_path: str = betterproto.string_field(3) - # See :option:`--config-yaml` for details. - config_yaml: str = betterproto.string_field(4) - # See :option:`--allow-unknown-static-fields` for details. - allow_unknown_static_fields: bool = betterproto.bool_field(5) - # See :option:`--reject-unknown-dynamic-fields` for details. - reject_unknown_dynamic_fields: bool = betterproto.bool_field(26) - # See :option:`--ignore-unknown-dynamic-fields` for details. - ignore_unknown_dynamic_fields: bool = betterproto.bool_field(30) - # See :option:`--admin-address-path` for details. - admin_address_path: str = betterproto.string_field(6) - # See :option:`--local-address-ip-version` for details. - local_address_ip_version: "CommandLineOptionsIpVersion" = betterproto.enum_field(7) - # See :option:`--log-level` for details. - log_level: str = betterproto.string_field(8) - # See :option:`--component-log-level` for details. - component_log_level: str = betterproto.string_field(9) - # See :option:`--log-format` for details. - log_format: str = betterproto.string_field(10) - # See :option:`--log-format-escaped` for details. - log_format_escaped: bool = betterproto.bool_field(27) - # See :option:`--log-path` for details. - log_path: str = betterproto.string_field(11) - # See :option:`--service-cluster` for details. - service_cluster: str = betterproto.string_field(13) - # See :option:`--service-node` for details. - service_node: str = betterproto.string_field(14) - # See :option:`--service-zone` for details. - service_zone: str = betterproto.string_field(15) - # See :option:`--file-flush-interval-msec` for details. - file_flush_interval: timedelta = betterproto.message_field(16) - # See :option:`--drain-time-s` for details. - drain_time: timedelta = betterproto.message_field(17) - # See :option:`--drain-strategy` for details. - drain_strategy: "CommandLineOptionsDrainStrategy" = betterproto.enum_field(33) - # See :option:`--parent-shutdown-time-s` for details. - parent_shutdown_time: timedelta = betterproto.message_field(18) - # See :option:`--mode` for details. - mode: "CommandLineOptionsMode" = betterproto.enum_field(19) - # See :option:`--disable-hot-restart` for details. - disable_hot_restart: bool = betterproto.bool_field(22) - # See :option:`--enable-mutex-tracing` for details. - enable_mutex_tracing: bool = betterproto.bool_field(23) - # See :option:`--restart-epoch` for details. - restart_epoch: int = betterproto.uint32_field(24) - # See :option:`--cpuset-threads` for details. - cpuset_threads: bool = betterproto.bool_field(25) - # See :option:`--disable-extensions` for details. - disabled_extensions: List[str] = betterproto.string_field(28) - # See :option:`--enable-fine-grain-logging` for details. - enable_fine_grain_logging: bool = betterproto.bool_field(34) - # See :option:`--socket-path` for details. - socket_path: str = betterproto.string_field(35) - # See :option:`--socket-mode` for details. - socket_mode: int = betterproto.uint32_field(36) - # See :option:`--enable-core-dump` for details. - enable_core_dump: bool = betterproto.bool_field(37) - # See :option:`--stats-tag` for details. - stats_tag: List[str] = betterproto.string_field(38) - - -@dataclass(eq=False, repr=False) -class TapRequest(betterproto.Message): - """ - The /tap admin request body that is used to configure an active tap - session. - """ - - # The opaque configuration ID used to match the configuration to a loaded - # extension. A tap extension configures a similar opaque ID that is used to - # match. - config_id: str = betterproto.string_field(1) - # The tap configuration to load. - tap_config: "__config_tap_v3__.TapConfig" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Listeners(betterproto.Message): - """ - Admin endpoint uses this wrapper for `/listeners` to display listener - status information. See :ref:`/listeners - ` for more information. - """ - - # List of listener statuses. - listener_statuses: List["ListenerStatus"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ListenerStatus(betterproto.Message): - """Details an individual listener's current status.""" - - # Name of the listener - name: str = betterproto.string_field(1) - # The actual local address that the listener is listening on. If a listener - # was configured to listen on port 0, then this address has the port that was - # allocated by the OS. - local_address: "__config_core_v3__.Address" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Certificates(betterproto.Message): - """ - Proto representation of certificate details. Admin endpoint uses this - wrapper for `/certs` to display certificate information. See :ref:`/certs - ` for more information. - """ - - # List of certificates known to an Envoy. - certificates: List["Certificate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Certificate(betterproto.Message): - # Details of CA certificate. - ca_cert: List["CertificateDetails"] = betterproto.message_field(1) - # Details of Certificate Chain - cert_chain: List["CertificateDetails"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class CertificateDetails(betterproto.Message): - """[#next-free-field: 8]""" - - # Path of the certificate. - path: str = betterproto.string_field(1) - # Certificate Serial Number. - serial_number: str = betterproto.string_field(2) - # List of Subject Alternate names. - subject_alt_names: List["SubjectAlternateName"] = betterproto.message_field(3) - # Minimum of days until expiration of certificate and it's chain. - days_until_expiration: int = betterproto.uint64_field(4) - # Indicates the time from which the certificate is valid. - valid_from: datetime = betterproto.message_field(5) - # Indicates the time at which the certificate expires. - expiration_time: datetime = betterproto.message_field(6) - # Details related to the OCSP response associated with this certificate, if - # any. - ocsp_details: "CertificateDetailsOcspDetails" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class CertificateDetailsOcspDetails(betterproto.Message): - # Indicates the time from which the OCSP response is valid. - valid_from: datetime = betterproto.message_field(1) - # Indicates the time at which the OCSP response expires. - expiration: datetime = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class SubjectAlternateName(betterproto.Message): - dns: str = betterproto.string_field(1, group="name") - uri: str = betterproto.string_field(2, group="name") - ip_address: str = betterproto.string_field(3, group="name") - - -from ...config.bootstrap import v3 as __config_bootstrap_v3__ -from ...config.cluster import v3 as __config_cluster_v3__ -from ...config.core import v3 as __config_core_v3__ -from ...config.tap import v3 as __config_tap_v3__ -from ...type import v3 as __type_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/annotations/__init__.py b/src/envoy_data_plane/envoy/annotations/__init__.py deleted file mode 100644 index 7e98236..0000000 --- a/src/envoy_data_plane/envoy/annotations/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/annotations/deprecation.proto, envoy/annotations/resource.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ResourceAnnotation(betterproto.Message): - # Annotation for xDS services that indicates the fully-qualified Protobuf - # type for the resource type. - type: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/api/__init__.py b/src/envoy_data_plane/envoy/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/api/v2/__init__.py b/src/envoy_data_plane/envoy/api/v2/__init__.py deleted file mode 100644 index 710a235..0000000 --- a/src/envoy_data_plane/envoy/api/v2/__init__.py +++ /dev/null @@ -1,2197 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/cds.proto, envoy/api/v2/cluster.proto, envoy/api/v2/discovery.proto, envoy/api/v2/eds.proto, envoy/api/v2/endpoint.proto, envoy/api/v2/lds.proto, envoy/api/v2/listener.proto, envoy/api/v2/rds.proto, envoy/api/v2/route.proto, envoy/api/v2/scoped_route.proto, envoy/api/v2/srds.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class ClusterDiscoveryType(betterproto.Enum): - STATIC = 0 - STRICT_DNS = 1 - LOGICAL_DNS = 2 - EDS = 3 - ORIGINAL_DST = 4 - - -class ClusterLbPolicy(betterproto.Enum): - ROUND_ROBIN = 0 - LEAST_REQUEST = 1 - RING_HASH = 2 - RANDOM = 3 - ORIGINAL_DST_LB = 4 - MAGLEV = 5 - CLUSTER_PROVIDED = 6 - LOAD_BALANCING_POLICY_CONFIG = 7 - - -class ClusterDnsLookupFamily(betterproto.Enum): - AUTO = 0 - V4_ONLY = 1 - V6_ONLY = 2 - - -class ClusterClusterProtocolSelection(betterproto.Enum): - USE_CONFIGURED_PROTOCOL = 0 - USE_DOWNSTREAM_PROTOCOL = 1 - - -class ClusterLbSubsetConfigLbSubsetFallbackPolicy(betterproto.Enum): - NO_FALLBACK = 0 - ANY_ENDPOINT = 1 - DEFAULT_SUBSET = 2 - - -class ClusterLbSubsetConfigLbSubsetSelectorLbSubsetSelectorFallbackPolicy( - betterproto.Enum -): - NOT_DEFINED = 0 - NO_FALLBACK = 1 - ANY_ENDPOINT = 2 - DEFAULT_SUBSET = 3 - KEYS_SUBSET = 4 - - -class ClusterRingHashLbConfigHashFunction(betterproto.Enum): - XX_HASH = 0 - MURMUR_HASH_2 = 1 - - -class ListenerDrainType(betterproto.Enum): - DEFAULT = 0 - MODIFY_ONLY = 1 - - -@dataclass(eq=False, repr=False) -class ClusterLoadAssignment(betterproto.Message): - """ - Each route from RDS will map to a single cluster or traffic split across - clusters using weights expressed in the RDS WeightedCluster. With EDS, each - cluster is treated independently from a LB perspective, with LB taking - place between the Localities within a cluster and at a finer granularity - between the hosts within a locality. The percentage of traffic for each - endpoint is determined by both its load_balancing_weight, and the - load_balancing_weight of its locality. First, a locality will be selected, - then an endpoint within that locality will be chose based on its weight. - [#next-free-field: 6] - """ - - # Name of the cluster. This will be the :ref:`service_name - # ` value if specified - # in the cluster :ref:`EdsClusterConfig - # `. - cluster_name: str = betterproto.string_field(1) - # List of endpoints to load balance to. - endpoints: List["endpoint.LocalityLbEndpoints"] = betterproto.message_field(2) - # Map of named endpoints that can be referenced in LocalityLbEndpoints. - # [#not-implemented-hide:] - named_endpoints: Dict[str, "endpoint.Endpoint"] = betterproto.map_field( - 5, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - # Load balancing policy settings. - policy: "ClusterLoadAssignmentPolicy" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterLoadAssignmentPolicy(betterproto.Message): - """Load balancing policy settings. [#next-free-field: 6]""" - - # Action to trim the overall incoming traffic to protect the upstream hosts. - # This action allows protection in case the hosts are unable to recover from - # an outage, or unable to autoscale or unable to handle incoming traffic - # volume for any reason. At the client each category is applied one after the - # other to generate the 'actual' drop percentage on all outgoing traffic. For - # example: .. code-block:: json { "drop_overloads": [ { "category": - # "throttle", "drop_percentage": 60 } { "category": "lb", - # "drop_percentage": 50 } ]} The actual drop percentages applied to the - # traffic at the clients will be "throttle"_drop = 60% "lb"_drop = 20% - # // 50% of the remaining 'actual' load, which is 40%. - # actual_outgoing_load = 20% // remaining after applying all categories. - # [#not-implemented-hide:] - drop_overloads: List[ - "ClusterLoadAssignmentPolicyDropOverload" - ] = betterproto.message_field(2) - # Priority levels and localities are considered overprovisioned with this - # factor (in percentage). This means that we don't consider a priority level - # or locality unhealthy until the percentage of healthy hosts multiplied by - # the overprovisioning factor drops below 100. With the default value - # 140(1.4), Envoy doesn't consider a priority level or a locality unhealthy - # until their percentage of healthy hosts drops below 72%. For example: .. - # code-block:: json { "overprovisioning_factor": 100 } Read more at - # :ref:`priority levels ` and - # :ref:`localities `. - overprovisioning_factor: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # The max time until which the endpoints from this assignment can be used. If - # no new assignments are received before this time expires the endpoints are - # considered stale and should be marked unhealthy. Defaults to 0 which means - # endpoints never go stale. - endpoint_stale_after: timedelta = betterproto.message_field(4) - # The flag to disable overprovisioning. If it is set to true, - # :ref:`overprovisioning factor - # ` will be ignored and - # Envoy will not perform graceful failover between priority levels or - # localities as endpoints become unhealthy. Otherwise Envoy will perform - # graceful failover as :ref:`overprovisioning factor - # ` suggests. [#not- - # implemented-hide:] - disable_overprovisioning: bool = betterproto.bool_field(5) - - def __post_init__(self) -> None: - super().__post_init__() - if self.disable_overprovisioning: - warnings.warn( - "ClusterLoadAssignmentPolicy.disable_overprovisioning is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class ClusterLoadAssignmentPolicyDropOverload(betterproto.Message): - """[#not-implemented-hide:]""" - - # Identifier for the policy specifying the drop. - category: str = betterproto.string_field(1) - # Percentage of traffic that should be dropped for the category. - drop_percentage: "__type__.FractionalPercent" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Cluster(betterproto.Message): - """Configuration for a single upstream cluster. [#next-free-field: 48]""" - - # Configuration to use different transport sockets for different endpoints. - # The entry of *envoy.transport_socket_match* in the - # :ref:`LbEndpoint.Metadata ` - # is used to match against the transport sockets as they appear in the list. - # The first :ref:`match ` is - # used. For example, with the following match .. code-block:: yaml - # transport_socket_matches: - name: "enableMTLS" match: acceptMTLS: - # true transport_socket: name: envoy.transport_sockets.tls - # config: { ... } # tls socket configuration - name: "defaultToPlaintext" - # match: {} transport_socket: name: - # envoy.transport_sockets.raw_buffer Connections to the endpoints whose - # metadata value under *envoy.transport_socket_match* having - # "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket - # configuration. If a :ref:`socket match - # ` with empty match criteria is - # provided, that always match any endpoint. For example, the - # "defaultToPlaintext" socket match in case above. If an endpoint metadata's - # value under *envoy.transport_socket_match* does not match any - # *TransportSocketMatch*, socket configuration fallbacks to use the - # *tls_context* or *transport_socket* specified in this cluster. This field - # allows gradual and flexible transport socket configuration changes. The - # metadata of endpoints in EDS can indicate transport socket capabilities. - # For example, an endpoint's metadata can have two key value pairs as - # "acceptMTLS": "true", "acceptPlaintext": "true". While some other - # endpoints, only accepting plaintext traffic has "acceptPlaintext": "true" - # metadata information. Then the xDS server can configure the CDS to a - # client, Envoy A, to send mutual TLS traffic for endpoints with - # "acceptMTLS": "true", by adding a corresponding *TransportSocketMatch* in - # this field. Other client Envoys receive CDS without - # *transport_socket_match* set, and still send plain text traffic to the same - # cluster. [#comment:TODO(incfly): add a detailed architecture doc on - # intended usage.] - transport_socket_matches: List[ - "ClusterTransportSocketMatch" - ] = betterproto.message_field(43) - # Supplies the name of the cluster which must be unique across all clusters. - # The cluster name is used when emitting :ref:`statistics - # ` if :ref:`alt_stat_name - # ` is not provided. Any ``:`` in the - # cluster name will be converted to ``_`` when emitting statistics. - name: str = betterproto.string_field(1) - # An optional alternative to the cluster name to be used while emitting - # stats. Any ``:`` in the name will be converted to ``_`` when emitting - # statistics. This should not be confused with :ref:`Router Filter Header - # `. - alt_stat_name: str = betterproto.string_field(28) - # The :ref:`service discovery type ` - # to use for resolving the cluster. - type: "ClusterDiscoveryType" = betterproto.enum_field( - 2, group="cluster_discovery_type" - ) - # The custom cluster type. - cluster_type: "ClusterCustomClusterType" = betterproto.message_field( - 38, group="cluster_discovery_type" - ) - # Configuration to use for EDS updates for the Cluster. - eds_cluster_config: "ClusterEdsClusterConfig" = betterproto.message_field(3) - # The timeout for new network connections to hosts in the cluster. - connect_timeout: timedelta = betterproto.message_field(4) - # Soft limit on size of the cluster’s connections read and write buffers. If - # unspecified, an implementation defined default is applied (1MiB). - per_connection_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # The :ref:`load balancer type ` to use - # when picking a host in the cluster. - lb_policy: "ClusterLbPolicy" = betterproto.enum_field(6) - # If the service discovery type is - # :ref:`STATIC`, - # :ref:`STRICT_DNS` or - # :ref:`LOGICAL_DNS`, - # then hosts is required. .. attention:: **This field is deprecated**. Set - # the :ref:`load_assignment` field - # instead. - hosts: List["core.Address"] = betterproto.message_field(7) - # Setting this is required for specifying members of - # :ref:`STATIC`, - # :ref:`STRICT_DNS` or - # :ref:`LOGICAL_DNS` - # clusters. This field supersedes the *hosts* field in the v2 API. .. - # attention:: Setting this allows non-EDS cluster types to contain embedded - # EDS equivalent :ref:`endpoint - # assignments`. - load_assignment: "ClusterLoadAssignment" = betterproto.message_field(33) - # Optional :ref:`active health checking ` - # configuration for the cluster. If no configuration is specified no health - # checking will be done and all cluster members will be considered healthy at - # all times. - health_checks: List["core.HealthCheck"] = betterproto.message_field(8) - # Optional maximum requests for a single upstream connection. This parameter - # is respected by both the HTTP/1.1 and HTTP/2 connection pool - # implementations. If not specified, there is no limit. Setting this - # parameter to 1 will effectively disable keep alive. - max_requests_per_connection: Optional[int] = betterproto.message_field( - 9, wraps=betterproto.TYPE_UINT32 - ) - # Optional :ref:`circuit breaking ` for the - # cluster. - circuit_breakers: "cluster.CircuitBreakers" = betterproto.message_field(10) - # The TLS configuration for connections to the upstream cluster. .. - # attention:: **This field is deprecated**. Use `transport_socket` with - # name `tls` instead. If both are set, `transport_socket` takes priority. - tls_context: "auth.UpstreamTlsContext" = betterproto.message_field(11) - # HTTP protocol options that are applied only to upstream HTTP connections. - # These options apply to all HTTP versions. - upstream_http_protocol_options: "core.UpstreamHttpProtocolOptions" = ( - betterproto.message_field(46) - ) - # Additional options when handling HTTP requests upstream. These options will - # be applicable to both HTTP1 and HTTP2 requests. - common_http_protocol_options: "core.HttpProtocolOptions" = ( - betterproto.message_field(29) - ) - # Additional options when handling HTTP1 requests. - http_protocol_options: "core.Http1ProtocolOptions" = betterproto.message_field(13) - # Even if default HTTP2 protocol options are desired, this field must be set - # so that Envoy will assume that the upstream supports HTTP/2 when making new - # HTTP connection pool connections. Currently, Envoy only supports prior - # knowledge for upstream connections. Even if TLS is used with ALPN, - # `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - # connections to happen over plain text. - http2_protocol_options: "core.Http2ProtocolOptions" = betterproto.message_field(14) - # The extension_protocol_options field is used to provide extension-specific - # protocol options for upstream connections. The key should match the - # extension filter name, such as "envoy.filters.network.thrift_proxy". See - # the extension's documentation for details on specific options. - extension_protocol_options: Dict[ - str, "betterproto_lib_google_protobuf.Struct" - ] = betterproto.map_field(35, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # The extension_protocol_options field is used to provide extension-specific - # protocol options for upstream connections. The key should match the - # extension filter name, such as "envoy.filters.network.thrift_proxy". See - # the extension's documentation for details on specific options. - typed_extension_protocol_options: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(36, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # If the DNS refresh rate is specified and the cluster type is either - # :ref:`STRICT_DNS`, - # or - # :ref:`LOGICAL_DNS`, - # this value is used as the cluster’s DNS refresh rate. The value configured - # must be at least 1ms. If this setting is not specified, the value defaults - # to 5000ms. For cluster types other than - # :ref:`STRICT_DNS` - # and - # :ref:`LOGICAL_DNS` - # this setting is ignored. - dns_refresh_rate: timedelta = betterproto.message_field(16) - # If the DNS failure refresh rate is specified and the cluster type is either - # :ref:`STRICT_DNS`, - # or - # :ref:`LOGICAL_DNS`, - # this is used as the cluster’s DNS refresh rate when requests are failing. - # If this setting is not specified, the failure refresh rate defaults to the - # DNS refresh rate. For cluster types other than - # :ref:`STRICT_DNS` - # and - # :ref:`LOGICAL_DNS` - # this setting is ignored. - dns_failure_refresh_rate: "ClusterRefreshRate" = betterproto.message_field(44) - # Optional configuration for setting cluster's DNS refresh rate. If the value - # is set to true, cluster's DNS refresh rate will be set to resource record's - # TTL which comes from DNS resolution. - respect_dns_ttl: bool = betterproto.bool_field(39) - # The DNS IP address resolution policy. If this setting is not specified, the - # value defaults to - # :ref:`AUTO`. - dns_lookup_family: "ClusterDnsLookupFamily" = betterproto.enum_field(17) - # If DNS resolvers are specified and the cluster type is either - # :ref:`STRICT_DNS`, - # or - # :ref:`LOGICAL_DNS`, - # this value is used to specify the cluster’s dns resolvers. If this setting - # is not specified, the value defaults to the default resolver, which uses - # /etc/resolv.conf for configuration. For cluster types other than - # :ref:`STRICT_DNS` - # and - # :ref:`LOGICAL_DNS` - # this setting is ignored. Setting this value causes failure if the - # ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is - # true during server startup. Apple's API only allows overriding DNS - # resolvers via system settings. - dns_resolvers: List["core.Address"] = betterproto.message_field(18) - # [#next-major-version: Reconcile DNS options in a single message.] Always - # use TCP queries instead of UDP queries for DNS lookups. Setting this value - # causes failure if the - # ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is - # true during server startup. Apple' API only uses UDP for DNS resolution. - use_tcp_for_dns_lookups: bool = betterproto.bool_field(45) - # If specified, outlier detection will be enabled for this upstream cluster. - # Each of the configuration values can be overridden via :ref:`runtime values - # `. - outlier_detection: "cluster.OutlierDetection" = betterproto.message_field(19) - # The interval for removing stale hosts from a cluster type :ref:`ORIGINAL_DS - # T`. Hosts are - # considered stale if they have not been used as upstream destinations during - # this interval. New hosts are added to original destination clusters on - # demand as new connections are redirected to Envoy, causing the number of - # hosts in the cluster to grow over time. Hosts that are not stale (they are - # actively used as destinations) are kept in the cluster, which allows - # connections to them remain open, saving the latency that would otherwise be - # spent on opening new connections. If this setting is not specified, the - # value defaults to 5000ms. For cluster types other than :ref:`ORIGINAL_DST` this setting is - # ignored. - cleanup_interval: timedelta = betterproto.message_field(20) - # Optional configuration used to bind newly established upstream connections. - # This overrides any bind_config specified in the bootstrap proto. If the - # address and port are empty, no bind will be performed. - upstream_bind_config: "core.BindConfig" = betterproto.message_field(21) - # Configuration for load balancing subsetting. - lb_subset_config: "ClusterLbSubsetConfig" = betterproto.message_field(22) - # Optional configuration for the Ring Hash load balancing policy. - ring_hash_lb_config: "ClusterRingHashLbConfig" = betterproto.message_field( - 23, group="lb_config" - ) - # Optional configuration for the Original Destination load balancing policy. - original_dst_lb_config: "ClusterOriginalDstLbConfig" = betterproto.message_field( - 34, group="lb_config" - ) - # Optional configuration for the LeastRequest load balancing policy. - least_request_lb_config: "ClusterLeastRequestLbConfig" = betterproto.message_field( - 37, group="lb_config" - ) - # Common configuration for all load balancer implementations. - common_lb_config: "ClusterCommonLbConfig" = betterproto.message_field(27) - # Optional custom transport socket implementation to use for upstream - # connections. To setup TLS, set a transport socket with name `tls` and - # :ref:`UpstreamTlsContexts ` in the - # `typed_config`. If no transport socket configuration is specified, new - # connections will be set up with plaintext. - transport_socket: "core.TransportSocket" = betterproto.message_field(24) - # The Metadata field can be used to provide additional information about the - # cluster. It can be used for stats, logging, and varying filter behavior. - # Fields should use reverse DNS notation to denote which entity within Envoy - # will need the information. For instance, if the metadata is intended for - # the Router filter, the filter name should be specified as - # *envoy.filters.http.router*. - metadata: "core.Metadata" = betterproto.message_field(25) - # Determines how Envoy selects the protocol used to speak to upstream hosts. - protocol_selection: "ClusterClusterProtocolSelection" = betterproto.enum_field(26) - # Optional options for upstream connections. - upstream_connection_options: "UpstreamConnectionOptions" = ( - betterproto.message_field(30) - ) - # If an upstream host becomes unhealthy (as determined by the configured - # health checks or outlier detection), immediately close all connections to - # the failed host. .. note:: This is currently only supported for - # connections created by tcp_proxy. .. note:: The current implementation of - # this feature closes all connections immediately when the unhealthy status - # is detected. If there are a large number of connections open to an - # upstream host that becomes unhealthy, Envoy may spend a substantial amount - # of time exclusively closing these connections, and not processing any - # other traffic. - close_connections_on_host_health_failure: bool = betterproto.bool_field(31) - # If set to true, Envoy will ignore the health value of a host when - # processing its removal from service discovery. This means that if active - # health checking is used, Envoy will *not* wait for the endpoint to go - # unhealthy before removing it. - drain_connections_on_host_removal: bool = betterproto.bool_field(32) - # An (optional) network filter chain, listed in the order the filters should - # be applied. The chain will be applied to all outgoing connections that - # Envoy makes to the upstream servers of this cluster. - filters: List["cluster.Filter"] = betterproto.message_field(40) - # [#not-implemented-hide:] New mechanism for LB policy configuration. Used - # only if the :ref:`lb_policy` field has - # the value :ref:`LOAD_BALANCING_POLICY_CONFIG`. - load_balancing_policy: "LoadBalancingPolicy" = betterproto.message_field(41) - # [#not-implemented-hide:] If present, tells the client where to send load - # reports via LRS. If not present, the client will fall back to a client-side - # default, which may be either (a) don't send any load reports or (b) send - # load reports for all clusters to a single default server (which may be - # configured in the bootstrap file). Note that if multiple clusters point to - # the same LRS server, the client may choose to create a separate stream for - # each cluster or it may choose to coalesce the data for multiple clusters - # onto a single stream. Either way, the client must make sure to send the - # data for any given cluster on no more than one stream. [#next-major- - # version: In the v3 API, we should consider restructuring this somehow, - # maybe by allowing LRS to go on the ADS stream, or maybe by moving some of - # the negotiation from the LRS stream here.] - lrs_server: "core.ConfigSource" = betterproto.message_field(42) - # If track_timeout_budgets is true, the :ref:`timeout budget histograms - # ` will be published - # for each request. These show what percentage of a request's per try and - # global timeout was used. A value of 0 would indicate that none of the - # timeout was used or that the timeout was infinite. A value of 100 would - # indicate that the request took the entirety of the timeout given to it. - track_timeout_budgets: bool = betterproto.bool_field(47) - - def __post_init__(self) -> None: - super().__post_init__() - if self.hosts: - warnings.warn("Cluster.hosts is deprecated", DeprecationWarning) - if self.tls_context: - warnings.warn("Cluster.tls_context is deprecated", DeprecationWarning) - if self.extension_protocol_options: - warnings.warn( - "Cluster.extension_protocol_options is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class ClusterTransportSocketMatch(betterproto.Message): - """ - TransportSocketMatch specifies what transport socket config will be used - when the match conditions are satisfied. - """ - - # The name of the match, used in stats generation. - name: str = betterproto.string_field(1) - # Optional endpoint metadata match criteria. The connection to the endpoint - # with metadata matching what is set in this field will use the transport - # socket configuration specified here. The endpoint's metadata entry in - # *envoy.transport_socket_match* is used to match against the values - # specified in this field. - match: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - # The configuration of the transport socket. - transport_socket: "core.TransportSocket" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterCustomClusterType(betterproto.Message): - """Extended cluster type.""" - - # The type of the cluster to instantiate. The name must match a supported - # cluster type. - name: str = betterproto.string_field(1) - # Cluster specific configuration which depends on the cluster being - # instantiated. See the supported cluster for further documentation. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterEdsClusterConfig(betterproto.Message): - """Only valid when discovery type is EDS.""" - - # Configuration for the source of EDS updates for this Cluster. - eds_config: "core.ConfigSource" = betterproto.message_field(1) - # Optional alternative to cluster name to present to EDS. This does not have - # the same restrictions as cluster name, i.e. it may be arbitrary length. - service_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterLbSubsetConfig(betterproto.Message): - """ - Optionally divide the endpoints in this cluster into subsets defined by - endpoint metadata and selected by route and weighted cluster metadata. - [#next-free-field: 8] - """ - - # The behavior used when no endpoint subset matches the selected route's - # metadata. The value defaults to :ref:`NO_FALLBACK`. - fallback_policy: "ClusterLbSubsetConfigLbSubsetFallbackPolicy" = ( - betterproto.enum_field(1) - ) - # Specifies the default subset of endpoints used during fallback if - # fallback_policy is :ref:`DEFAULT_SUBSET`. Each field in - # default_subset is compared to the matching LbEndpoint.Metadata under the - # *envoy.lb* namespace. It is valid for no hosts to match, in which case the - # behavior is the same as a fallback_policy of :ref:`NO_FALLBACK`. - default_subset: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(2) - ) - # For each entry, LbEndpoint.Metadata's *envoy.lb* namespace is traversed and - # a subset is created for each unique combination of key and value. For - # example: .. code-block:: json { "subset_selectors": [ { "keys": [ - # "version" ] }, { "keys": [ "stage", "hardware_type" ] } ]} A subset - # is matched when the metadata from the selected route and weighted cluster - # contains the same keys and values as the subset's metadata. The same host - # may appear in multiple subsets. - subset_selectors: List[ - "ClusterLbSubsetConfigLbSubsetSelector" - ] = betterproto.message_field(3) - # If true, routing to subsets will take into account the localities and - # locality weights of the endpoints when making the routing decision. There - # are some potential pitfalls associated with enabling this feature, as the - # resulting traffic split after applying both a subset match and locality - # weights might be undesirable. Consider for example a situation in which you - # have 50/50 split across two localities X/Y which have 100 hosts each - # without subsetting. If the subset LB results in X having only 1 host - # selected but Y having 100, then a lot more load is being dumped on the - # single host in X than originally anticipated in the load balancing - # assignment delivered via EDS. - locality_weight_aware: bool = betterproto.bool_field(4) - # When used with locality_weight_aware, scales the weight of each locality by - # the ratio of hosts in the subset vs hosts in the original subset. This aims - # to even out the load going to an individual locality if said locality is - # disproportionately affected by the subset predicate. - scale_locality_weight: bool = betterproto.bool_field(5) - # If true, when a fallback policy is configured and its corresponding subset - # fails to find a host this will cause any host to be selected instead. This - # is useful when using the default subset as the fallback policy, given the - # default subset might become empty. With this option enabled, if that - # happens the LB will attempt to select a host from the entire cluster. - panic_mode_any: bool = betterproto.bool_field(6) - # If true, metadata specified for a metadata key will be matched against the - # corresponding endpoint metadata if the endpoint metadata matches the value - # exactly OR it is a list value and any of the elements in the list matches - # the criteria. - list_as_any: bool = betterproto.bool_field(7) - - -@dataclass(eq=False, repr=False) -class ClusterLbSubsetConfigLbSubsetSelector(betterproto.Message): - """Specifications for subsets.""" - - # List of keys to match with the weighted cluster metadata. - keys: List[str] = betterproto.string_field(1) - # The behavior used when no endpoint subset matches the selected route's - # metadata. - fallback_policy: "ClusterLbSubsetConfigLbSubsetSelectorLbSubsetSelectorFallbackPolicy" = betterproto.enum_field( - 2 - ) - # Subset of - # :ref:`keys` - # used by :ref:`KEYS_SUBSET` fallback policy. - # It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - # For any other fallback policy the parameter is not used and should not be - # set. Only values also present in - # :ref:`keys` - # are allowed, but `fallback_keys_subset` cannot be equal to `keys`. - fallback_keys_subset: List[str] = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterLeastRequestLbConfig(betterproto.Message): - """Specific configuration for the LeastRequest load balancing policy.""" - - # The number of random healthy hosts from which the host with the fewest - # active requests will be chosen. Defaults to 2 so that we perform two-choice - # selection if the field is not set. - choice_count: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class ClusterRingHashLbConfig(betterproto.Message): - """ - Specific configuration for the - :ref:`RingHash` load - balancing policy. - """ - - # Minimum hash ring size. The larger the ring is (that is, the more hashes - # there are for each provided host) the better the request distribution will - # reflect the desired weights. Defaults to 1024 entries, and limited to 8M - # entries. See also :ref:`maximum_ring_size`. - minimum_ring_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT64 - ) - # The hash function used to hash hosts onto the ketama ring. The value - # defaults to :ref:`XX_HASH`. - hash_function: "ClusterRingHashLbConfigHashFunction" = betterproto.enum_field(3) - # Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, - # but can be lowered to further constrain resource use. See also :ref:`minimu - # m_ring_size`. - maximum_ring_size: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT64 - ) - - -@dataclass(eq=False, repr=False) -class ClusterOriginalDstLbConfig(betterproto.Message): - """ - Specific configuration for the :ref:`Original Destination - ` load balancing - policy. - """ - - # When true, :ref:`x-envoy-original-dst-host ` can be used to override destination address. .. - # attention:: This header isn't sanitized by default, so enabling this - # feature allows HTTP clients to route traffic to arbitrary hosts and/or - # ports, which may have serious security consequences. .. note:: If the - # header appears multiple times only the first value is used. - use_http_header: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfig(betterproto.Message): - """ - Common configuration for all load balancer implementations. [#next-free- - field: 8] - """ - - # Configures the :ref:`healthy panic threshold - # `. If not specified, the - # default is 50%. To disable panic mode, set to 0%. .. note:: The specified - # percent will be truncated to the nearest 1%. - healthy_panic_threshold: "__type__.Percent" = betterproto.message_field(1) - zone_aware_lb_config: "ClusterCommonLbConfigZoneAwareLbConfig" = ( - betterproto.message_field(2, group="locality_config_specifier") - ) - locality_weighted_lb_config: "ClusterCommonLbConfigLocalityWeightedLbConfig" = ( - betterproto.message_field(3, group="locality_config_specifier") - ) - # If set, all health check/weight/metadata updates that happen within this - # duration will be merged and delivered in one shot when the duration - # expires. The start of the duration is when the first update happens. This - # is useful for big clusters, with potentially noisy deploys that might - # trigger excessive CPU usage due to a constant stream of healthcheck state - # changes or metadata updates. The first set of updates to be seen apply - # immediately (e.g.: a new cluster). Please always keep in mind that the use - # of sandbox technologies may change this behavior. If this is not set, we - # default to a merge window of 1000ms. To disable it, set the merge window to - # 0. Note: merging does not apply to cluster membership changes (e.g.: - # adds/removes); this is because merging those updates isn't currently safe. - # See https://github.com/envoyproxy/envoy/pull/3941. - update_merge_window: timedelta = betterproto.message_field(4) - # If set to true, Envoy will not consider new hosts when computing load - # balancing weights until they have been health checked for the first time. - # This will have no effect unless active health checking is also configured. - # Ignoring a host means that for any load balancing calculations that adjust - # weights based on the ratio of eligible hosts and total hosts (priority - # spillover, locality weighting and panic mode) Envoy will exclude these - # hosts in the denominator. For example, with hosts in two priorities P0 and - # P1, where P0 looks like {healthy, unhealthy (new), unhealthy (new)} and - # where P1 looks like {healthy, healthy} all traffic will still hit P0, as 1 - # / (3 - 2) = 1. Enabling this will allow scaling up the number of hosts for - # a given cluster without entering panic mode or triggering priority - # spillover, assuming the hosts pass the first health check. If panic mode is - # triggered, new hosts are still eligible for traffic; they simply do not - # contribute to the calculation when deciding whether panic mode is enabled - # or not. - ignore_new_hosts_until_first_hc: bool = betterproto.bool_field(5) - # If set to `true`, the cluster manager will drain all existing connections - # to upstream hosts whenever hosts are added or removed from the cluster. - close_connections_on_host_set_change: bool = betterproto.bool_field(6) - # Common Configuration for all consistent hashing load balancers (MaglevLb, - # RingHashLb, etc.) - consistent_hashing_lb_config: "ClusterCommonLbConfigConsistentHashingLbConfig" = ( - betterproto.message_field(7) - ) - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfigZoneAwareLbConfig(betterproto.Message): - """ - Configuration for :ref:`zone aware routing - `. - """ - - # Configures percentage of requests that will be considered for zone aware - # routing if zone aware routing is configured. If not specified, the default - # is 100%. * :ref:`runtime values - # `. * :ref:`Zone aware - # routing support `. - routing_enabled: "__type__.Percent" = betterproto.message_field(1) - # Configures minimum upstream cluster size required for zone aware routing If - # upstream cluster size is less than specified, zone aware routing is not - # performed even if zone aware routing is configured. If not specified, the - # default is 6. * :ref:`runtime values - # `. * :ref:`Zone aware - # routing support `. - min_cluster_size: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT64 - ) - # If set to true, Envoy will not consider any hosts when the cluster is in - # :ref:`panic mode`. Instead, - # the cluster will fail all requests as if all hosts are unhealthy. This can - # help avoid potentially overwhelming a failing service. - fail_traffic_on_panic: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfigLocalityWeightedLbConfig(betterproto.Message): - """ - Configuration for :ref:`locality weighted load balancing - ` - """ - - pass - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfigConsistentHashingLbConfig(betterproto.Message): - """ - Common Configuration for all consistent hashing load balancers (MaglevLb, - RingHashLb, etc.) - """ - - # If set to `true`, the cluster will use hostname instead of the resolved - # address as the key to consistently hash to an upstream host. Only valid for - # StrictDNS clusters with hostnames which resolve to a single IP address. - use_hostname_for_hashing: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class ClusterRefreshRate(betterproto.Message): - # Specifies the base interval between refreshes. This parameter is required - # and must be greater than zero and less than :ref:`max_interval - # `. - base_interval: timedelta = betterproto.message_field(1) - # Specifies the maximum interval between refreshes. This parameter is - # optional, but must be greater than or equal to the :ref:`base_interval - # ` if set. The default - # is 10 times the :ref:`base_interval - # `. - max_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class LoadBalancingPolicy(betterproto.Message): - """ - [#not-implemented-hide:] Extensible load balancing policy configuration. - Every LB policy defined via this mechanism will be identified via a unique - name using reverse DNS notation. If the policy needs configuration - parameters, it must define a message for its own configuration, which will - be stored in the config field. The name of the policy will tell clients - which type of message they should expect to see in the config field. Note - that there are cases where it is useful to be able to independently select - LB policies for choosing a locality and for choosing an endpoint within - that locality. For example, a given deployment may always use the same - policy to choose the locality, but for choosing the endpoint within the - locality, some clusters may use weighted-round-robin, while others may use - some sort of session-based balancing. This can be accomplished via - hierarchical LB policies, where the parent LB policy creates a child LB - policy for each locality. For each request, the parent chooses the locality - and then delegates to the child policy for that locality to choose the - endpoint within the locality. To facilitate this, the config message for - the top-level LB policy may include a field of type LoadBalancingPolicy - that specifies the child policy. - """ - - # Each client will iterate over the list in order and stop at the first - # policy that it supports. This provides a mechanism for starting to use new - # LB policies that are not yet supported by all clients. - policies: List["LoadBalancingPolicyPolicy"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class LoadBalancingPolicyPolicy(betterproto.Message): - # Required. The name of the LB policy. - name: str = betterproto.string_field(1) - # Optional config for the LB policy. No more than one of these two fields may - # be populated. - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(3) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn( - "LoadBalancingPolicyPolicy.config is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class UpstreamBindConfig(betterproto.Message): - """ - An extensible structure containing the address Envoy should bind to when - establishing upstream connections. - """ - - # The address Envoy should bind to when establishing upstream connections. - source_address: "core.Address" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class UpstreamConnectionOptions(betterproto.Message): - # If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - tcp_keepalive: "core.TcpKeepalive" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Listener(betterproto.Message): - """[#next-free-field: 23]""" - - # The unique name by which this listener is known. If no name is provided, - # Envoy will allocate an internal UUID for the listener. If the listener is - # to be dynamically updated or removed via :ref:`LDS ` - # a unique name must be provided. - name: str = betterproto.string_field(1) - # The address that the listener should listen on. In general, the address - # must be unique, though that is governed by the bind rules of the OS. E.g., - # multiple listeners can listen on port 0 on Linux as the actual port will be - # allocated by the OS. - address: "core.Address" = betterproto.message_field(2) - # A list of filter chains to consider for this listener. The - # :ref:`FilterChain ` with the most - # specific :ref:`FilterChainMatch ` - # criteria is used on a connection. Example using SNI for filter chain - # selection can be found in the :ref:`FAQ entry `. - filter_chains: List["listener.FilterChain"] = betterproto.message_field(3) - # If a connection is redirected using *iptables*, the port on which the proxy - # receives it might be different from the original destination address. When - # this flag is set to true, the listener hands off redirected connections to - # the listener associated with the original destination address. If there is - # no listener associated with the original destination address, the - # connection is handled by the listener that receives it. Defaults to false. - # .. attention:: This field is deprecated. Use :ref:`an original_dst - # ` :ref:`listener filter - # ` instead. Note that hand off - # to another listener is *NOT* performed without this flag. Once - # :ref:`FilterChainMatch ` is - # implemented this flag will be removed, as filter chain matching can be - # used to select a filter chain based on the restored destination address. - use_original_dst: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # Soft limit on size of the listener’s new connection read and write buffers. - # If unspecified, an implementation defined default is applied (1MiB). - per_connection_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # Listener metadata. - metadata: "core.Metadata" = betterproto.message_field(6) - # [#not-implemented-hide:] - deprecated_v1: "ListenerDeprecatedV1" = betterproto.message_field(7) - # The type of draining to perform at a listener-wide level. - drain_type: "ListenerDrainType" = betterproto.enum_field(8) - # Listener filters have the opportunity to manipulate and augment the - # connection metadata that is used in connection filter chain matching, for - # example. These filters are run before any in :ref:`filter_chains - # `. Order matters as the filters are - # processed sequentially right after a socket has been accepted by the - # listener, and before a connection is created. UDP Listener filters can be - # specified when the protocol in the listener socket address in - # :ref:`protocol ` is :ref:`UDP - # `. UDP listeners - # currently support a single filter. - listener_filters: List["listener.ListenerFilter"] = betterproto.message_field(9) - # The timeout to wait for all listener filters to complete operation. If the - # timeout is reached, the accepted socket is closed without a connection - # being created unless `continue_on_listener_filters_timeout` is set to true. - # Specify 0 to disable the timeout. If not specified, a default timeout of - # 15s is used. - listener_filters_timeout: timedelta = betterproto.message_field(15) - # Whether a connection should be created when listener filters timeout. - # Default is false. .. attention:: Some listener filters, such as - # :ref:`Proxy Protocol filter `, - # should not be used with this option. It will cause unexpected behavior - # when a connection is created. - continue_on_listener_filters_timeout: bool = betterproto.bool_field(17) - # Whether the listener should be set as a transparent socket. When this flag - # is set to true, connections can be redirected to the listener using an - # *iptables* *TPROXY* target, in which case the original source and - # destination addresses and ports are preserved on accepted connections. This - # flag should be used in combination with :ref:`an original_dst - # ` :ref:`listener filter - # ` to mark the connections' local - # addresses as "restored." This can be used to hand off each redirected - # connection to another listener associated with the connection's destination - # address. Direct connections to the socket without using *TPROXY* cannot be - # distinguished from connections redirected using *TPROXY* and are therefore - # treated as if they were redirected. When this flag is set to false, the - # listener's socket is explicitly reset as non-transparent. Setting this flag - # requires Envoy to run with the *CAP_NET_ADMIN* capability. When this flag - # is not set (default), the socket is not modified, i.e. the transparent - # option is neither set nor reset. - transparent: Optional[bool] = betterproto.message_field( - 10, wraps=betterproto.TYPE_BOOL - ) - # Whether the listener should set the *IP_FREEBIND* socket option. When this - # flag is set to true, listeners can be bound to an IP address that is not - # configured on the system running Envoy. When this flag is set to false, the - # option *IP_FREEBIND* is disabled on the socket. When this flag is not set - # (default), the socket is not modified, i.e. the option is neither enabled - # nor disabled. - freebind: Optional[bool] = betterproto.message_field( - 11, wraps=betterproto.TYPE_BOOL - ) - # Additional socket options that may not be present in Envoy source code or - # precompiled binaries. - socket_options: List["core.SocketOption"] = betterproto.message_field(13) - # Whether the listener should accept TCP Fast Open (TFO) connections. When - # this flag is set to a value greater than 0, the option TCP_FASTOPEN is - # enabled on the socket, with a queue length of the specified size (see - # `details in RFC7413 `_). - # When this flag is set to 0, the option TCP_FASTOPEN is disabled on the - # socket. When this flag is not set (default), the socket is not modified, - # i.e. the option is neither enabled nor disabled. On Linux, the - # net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - # TCP_FASTOPEN. See `ip-sysctl.txt - # `_. On - # macOS, only values of 0, 1, and unset are valid; other values may result in - # an error. To set the queue length on macOS, set the - # net.inet.tcp.fastopen_backlog kernel parameter. - tcp_fast_open_queue_length: Optional[int] = betterproto.message_field( - 12, wraps=betterproto.TYPE_UINT32 - ) - # Specifies the intended direction of the traffic relative to the local - # Envoy. This property is required on Windows for listeners using the - # original destination filter, see :ref:`Original Destination - # `. - traffic_direction: "core.TrafficDirection" = betterproto.enum_field(16) - # If the protocol in the listener socket address in :ref:`protocol - # ` is :ref:`UDP - # `, this field - # specifies the actual udp listener to create, i.e. :ref:`udp_listener_name - # ` = - # "raw_udp_listener" for creating a packet-oriented UDP listener. If not - # present, treat it as "raw_udp_listener". - udp_listener_config: "listener.UdpListenerConfig" = betterproto.message_field(18) - # Used to represent an API listener, which is used in non-proxy clients. The - # type of API exposed to the non-proxy application depends on the type of API - # listener. When this field is set, no other field except for - # :ref:`name` should be set. .. note:: - # Currently only one ApiListener can be installed; and it can only be done - # via bootstrap config, not LDS. [#next-major-version: In the v3 API, - # instead of this messy approach where the socket listener fields are - # directly in the top-level Listener message and the API listener types are - # in the ApiListener message, the socket listener messages should be in their - # own message, and the top-level Listener should essentially be a oneof that - # selects between the socket listener and the various types of API listener. - # That way, a given Listener message can structurally only contain the fields - # of the relevant type.] - api_listener: "__config_listener_v2__.ApiListener" = betterproto.message_field(19) - # The listener's connection balancer configuration, currently only applicable - # to TCP listeners. If no configuration is specified, Envoy will not attempt - # to balance active connections between worker threads. - connection_balance_config: "ListenerConnectionBalanceConfig" = ( - betterproto.message_field(20) - ) - # When this flag is set to true, listeners set the *SO_REUSEPORT* socket - # option and create one socket for each worker thread. This makes inbound - # connections distribute among worker threads roughly evenly in cases where - # there are a high number of connections. When this flag is set to false, all - # worker threads share one socket. Before Linux v4.19-rc1, new TCP - # connections may be rejected during hot restart (see `3rd paragraph in - # 'soreuseport' commit message - # `_). - # This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT - # socket `_. - reuse_port: bool = betterproto.bool_field(21) - # Configuration for :ref:`access logs ` emitted by - # this listener. - access_log: List[ - "__config_filter_accesslog_v2__.AccessLog" - ] = betterproto.message_field(22) - - def __post_init__(self) -> None: - super().__post_init__() - if self.use_original_dst: - warnings.warn("Listener.use_original_dst is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ListenerDeprecatedV1(betterproto.Message): - """[#not-implemented-hide:]""" - - # Whether the listener should bind to the port. A listener that doesn't bind - # can only receive connections redirected from other listeners that set - # use_original_dst parameter to true. Default is true. This is deprecated in - # v2, all Listeners will bind to their port. An additional filter chain must - # be created for every original destination port this listener may redirect - # to in v2, with the original port specified in the FilterChainMatch - # destination_port field. [#comment:TODO(PiotrSikora): Remove this once - # verified that we no longer need it.] - bind_to_port: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class ListenerConnectionBalanceConfig(betterproto.Message): - """Configuration for listener connection balancing.""" - - # If specified, the listener will use the exact connection balancer. - exact_balance: "ListenerConnectionBalanceConfigExactBalance" = ( - betterproto.message_field(1, group="balance_type") - ) - - -@dataclass(eq=False, repr=False) -class ListenerConnectionBalanceConfigExactBalance(betterproto.Message): - """ - A connection balancer implementation that does exact balancing. This means - that a lock is held during balancing so that connection counts are nearly - exactly balanced between worker threads. This is "nearly" exact in the - sense that a connection might close in parallel thus making the counts - incorrect, but this should be rectified on the next accept. This balancer - sacrifices accept throughput for accuracy and should be used when there are - a small number of connections that rarely cycle (e.g., service mesh gRPC - egress). - """ - - pass - - -@dataclass(eq=False, repr=False) -class RouteConfiguration(betterproto.Message): - """[#next-free-field: 11]""" - - # The name of the route configuration. For example, it might match - # :ref:`route_config_name ` in - # :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. - name: str = betterproto.string_field(1) - # An array of virtual hosts that make up the route table. - virtual_hosts: List["route.VirtualHost"] = betterproto.message_field(2) - # An array of virtual hosts will be dynamically loaded via the VHDS API. Both - # *virtual_hosts* and *vhds* fields will be used when present. - # *virtual_hosts* can be used for a base routing table or for infrequently - # changing virtual hosts. *vhds* is used for on-demand discovery of virtual - # hosts. The contents of these two fields will be merged to generate a - # routing table for a given RouteConfiguration, with *vhds* derived - # configuration taking precedence. - vhds: "Vhds" = betterproto.message_field(9) - # Optionally specifies a list of HTTP headers that the connection manager - # will consider to be internal only. If they are found on external requests - # they will be cleaned prior to filter invocation. See - # :ref:`config_http_conn_man_headers_x-envoy-internal` for more information. - internal_only_headers: List[str] = betterproto.string_field(3) - # Specifies a list of HTTP headers that should be added to each response that - # the connection manager encodes. Headers specified at this level are applied - # after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - # :ref:`envoy_api_msg_route.RouteAction`. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - response_headers_to_add: List["core.HeaderValueOption"] = betterproto.message_field( - 4 - ) - # Specifies a list of HTTP headers that should be removed from each response - # that the connection manager encodes. - response_headers_to_remove: List[str] = betterproto.string_field(5) - # Specifies a list of HTTP headers that should be added to each request - # routed by the HTTP connection manager. Headers specified at this level are - # applied after headers from any enclosed - # :ref:`envoy_api_msg_route.VirtualHost` or - # :ref:`envoy_api_msg_route.RouteAction`. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - request_headers_to_add: List["core.HeaderValueOption"] = betterproto.message_field( - 6 - ) - # Specifies a list of HTTP headers that should be removed from each request - # routed by the HTTP connection manager. - request_headers_to_remove: List[str] = betterproto.string_field(8) - # By default, headers that should be added/removed are evaluated from most to - # least specific: * route level * virtual host level * connection manager - # level To allow setting overrides at the route or virtual host level, this - # order can be reversed by setting this option to true. Defaults to false. - # [#next-major-version: In the v3 API, this will default to true.] - most_specific_header_mutations_wins: bool = betterproto.bool_field(10) - # An optional boolean that specifies whether the clusters that the route - # table refers to will be validated by the cluster manager. If set to true - # and a route refers to a non-existent cluster, the route table will not - # load. If set to false and a route refers to a non-existent cluster, the - # route table will load and the router filter will return a 404 if the route - # is selected at runtime. This setting defaults to true if the route table is - # statically defined via the :ref:`route_config ` - # option. This setting default to false if the route table is loaded - # dynamically via the :ref:`rds ` option. Users may wish to - # override the default behavior in certain cases (for example when using CDS - # with a static route table). - validate_clusters: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class Vhds(betterproto.Message): - # Configuration source specifier for VHDS. - config_source: "core.ConfigSource" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfiguration(betterproto.Message): - """ - Specifies a routing scope, which associates a - :ref:`Key` to a - :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). - The HTTP connection manager builds up a table consisting of these Key to - RouteConfiguration mappings, and looks up the RouteConfiguration to use per - request according to the algorithm specified in the :ref:`scope_key_builder - ` assigned to the HttpConnectionManager. For example, - with the following configurations (in YAML): HttpConnectionManager config: - .. code:: ... scoped_routes: name: foo-scoped-routes - scope_key_builder: fragments: - header_value_extractor: - name: X-Route-Selector element_separator: , - element: separator: = key: vip - ScopedRouteConfiguration resources (specified statically via :ref:`scoped_r - oute_configurations_list` or obtained - dynamically via SRDS): .. code:: (1) name: route-scope1 - route_configuration_name: route-config1 key: fragments: - - string_key: 172.10.10.20 (2) name: route-scope2 - route_configuration_name: route-config2 key: fragments: - - string_key: 172.20.20.30 A request from a client such as: .. code:: GET - / HTTP/1.1 Host: foo.com X-Route-Selector: vip=172.10.10.20 would - result in the routing table defined by the `route-config1` - RouteConfiguration being assigned to the HTTP request/stream. - """ - - # The name assigned to the routing scope. - name: str = betterproto.string_field(1) - # The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an - # RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated - # with this scope. - route_configuration_name: str = betterproto.string_field(2) - # The key to match against. - key: "ScopedRouteConfigurationKey" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfigurationKey(betterproto.Message): - """ - Specifies a key which is matched against the output of the :ref:`scope_key_ - builder` specified in the HttpConnectionManager. The - matching is done per HTTP request and is dependent on the order of the - fragments contained in the Key. - """ - - # The ordered set of fragments to match against. The order must match the - # fragments in the corresponding :ref:`scope_key_builder`. - fragments: List["ScopedRouteConfigurationKeyFragment"] = betterproto.message_field( - 1 - ) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfigurationKeyFragment(betterproto.Message): - # A string to match against. - string_key: str = betterproto.string_field(1, group="type") - - -@dataclass(eq=False, repr=False) -class DiscoveryRequest(betterproto.Message): - """ - A DiscoveryRequest requests a set of versioned resources of the same type - for a given Envoy node on some API. [#next-free-field: 7] - """ - - # The version_info provided in the request messages will be the version_info - # received with the most recent successfully processed response or empty on - # the first request. It is expected that no new request is sent after a - # response is received until the Envoy instance is ready to ACK/NACK the new - # configuration. ACK/NACK takes place by returning the new API config version - # as applied or the previous API config version respectively. Each type_url - # (see below) has an independent version associated with it. - version_info: str = betterproto.string_field(1) - # The node making the request. - node: "core.Node" = betterproto.message_field(2) - # List of resources to subscribe to, e.g. list of cluster names or a route - # configuration name. If this is empty, all resources for the API are - # returned. LDS/CDS may have empty resource_names, which will cause all - # resources for the Envoy instance to be returned. The LDS and CDS responses - # will then imply a number of resources that need to be fetched via EDS/RDS, - # which will be explicitly enumerated in resource_names. - resource_names: List[str] = betterproto.string_field(3) - # Type of the resource that is being requested, e.g. - # "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - # in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - # required for ADS. - type_url: str = betterproto.string_field(4) - # nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - # discussion on version_info and the DiscoveryResponse nonce comment. This - # may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - # or 2) the client has not yet accepted an update in this xDS stream (unlike - # delta, where it is populated only for new explicit ACKs). - response_nonce: str = betterproto.string_field(5) - # This is populated when the previous :ref:`DiscoveryResponse - # ` failed to update configuration. The - # *message* field in *error_details* provides the Envoy internal exception - # related to the failure. It is only intended for consumption during manual - # debugging, the string provided is not guaranteed to be stable across Envoy - # versions. - error_detail: "___google_rpc__.Status" = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class DiscoveryResponse(betterproto.Message): - """[#next-free-field: 7]""" - - # The version of the response data. - version_info: str = betterproto.string_field(1) - # The response resources. These resources are typed and depend on the API - # being called. - resources: List["betterproto_lib_google_protobuf.Any"] = betterproto.message_field( - 2 - ) - # [#not-implemented-hide:] Canary is used to support two Envoy command line - # flags: * --terminate-on-canary-transition-failure. When set, Envoy is able - # to terminate if it detects that configuration is stuck at canary. - # Consider this example sequence of updates: - Management server applies - # a canary config successfully. - Management server rolls back to a - # production config. - Envoy rejects the new production config. Since - # there is no sensible way to continue receiving configuration updates, - # Envoy will then terminate and apply production config from a clean slate. - # * --dry-run-canary. When set, a canary response will never be applied, only - # validated via a dry run. - canary: bool = betterproto.bool_field(3) - # Type URL for resources. Identifies the xDS API when muxing over ADS. Must - # be consistent with the type_url in the 'resources' repeated Any (if non- - # empty). - type_url: str = betterproto.string_field(4) - # For gRPC based subscriptions, the nonce provides a way to explicitly ack a - # specific DiscoveryResponse in a following DiscoveryRequest. Additional - # messages may have been sent by Envoy to the management server for the - # previous version on the stream prior to this DiscoveryResponse, that were - # unprocessed at response send time. The nonce allows the management server - # to ignore any further DiscoveryRequests for the previous version until a - # DiscoveryRequest bearing the nonce. The nonce is optional and is not - # required for non-stream based xDS implementations. - nonce: str = betterproto.string_field(5) - # [#not-implemented-hide:] The control plane instance that sent the response. - control_plane: "core.ControlPlane" = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class DeltaDiscoveryRequest(betterproto.Message): - """ - DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC - endpoint for Delta xDS. With Delta xDS, the DeltaDiscoveryResponses do not - need to include a full snapshot of the tracked resources. Instead, - DeltaDiscoveryResponses are a diff to the state of a xDS client. In Delta - XDS there are per-resource versions, which allow tracking state at the - resource granularity. An xDS Delta session is always in the context of a - gRPC bidirectional stream. This allows the xDS server to keep track of the - state of xDS clients connected to it. In Delta xDS the nonce field is - required and used to pair DeltaDiscoveryResponse to a DeltaDiscoveryRequest - ACK or NACK. Optionally, a response message level system_version_info is - present for debugging purposes only. DeltaDiscoveryRequest plays two - independent roles. Any DeltaDiscoveryRequest can be either or both of: [1] - informing the server of what resources the client has gained/lost interest - in (using resource_names_subscribe and resource_names_unsubscribe), or [2] - (N)ACKing an earlier resource update from the server (using response_nonce, - with presence of error_detail making it a NACK). Additionally, the first - message (for a given type_url) of a reconnected gRPC stream has a third - role: informing the server of the resources (and their versions) that the - client already possesses, using the initial_resource_versions field. As - with state-of-the-world, when multiple resource types are multiplexed - (ADS), all requests/acknowledgments/updates are logically walled off by - type_url: a Cluster ACK exists in a completely separate world from a prior - Route NACK. In particular, initial_resource_versions being sent at the - "start" of every gRPC stream actually entails a message for each type_url, - each with its own initial_resource_versions. [#next-free-field: 8] - """ - - # The node making the request. - node: "core.Node" = betterproto.message_field(1) - # Type of the resource that is being requested, e.g. - # "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". - type_url: str = betterproto.string_field(2) - # DeltaDiscoveryRequests allow the client to add or remove individual - # resources to the set of tracked resources in the context of a stream. All - # resource names in the resource_names_subscribe list are added to the set of - # tracked resources and all resource names in the resource_names_unsubscribe - # list are removed from the set of tracked resources. *Unlike* state-of-the- - # world xDS, an empty resource_names_subscribe or resource_names_unsubscribe - # list simply means that no resources are to be added or removed to the - # resource list. *Like* state-of-the-world xDS, the server must send updates - # for all tracked resources, but can also send updates for resources the - # client has not subscribed to. NOTE: the server must respond with all - # resources listed in resource_names_subscribe, even if it believes the - # client has the most recent version of them. The reason: the client may have - # dropped them, but then regained interest before it had a chance to send the - # unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. These - # two fields can be set in any DeltaDiscoveryRequest, including ACKs and - # initial_resource_versions. A list of Resource names to add to the list of - # tracked resources. - resource_names_subscribe: List[str] = betterproto.string_field(3) - # A list of Resource names to remove from the list of tracked resources. - resource_names_unsubscribe: List[str] = betterproto.string_field(4) - # Informs the server of the versions of the resources the xDS client knows - # of, to enable the client to continue the same logical xDS session even in - # the face of gRPC stream reconnection. It will not be populated: [1] in the - # very first stream of a session, since the client will not yet have any - # resources, [2] in any message after the first in a stream (for a given - # type_url), since the server will already be correctly tracking the client's - # state. (In ADS, the first message *of each type_url* of a reconnected - # stream populates this map.) The map's keys are names of xDS resources known - # to the xDS client. The map's values are opaque resource versions. - initial_resource_versions: Dict[str, str] = betterproto.map_field( - 5, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # When the DeltaDiscoveryRequest is a ACK or NACK message in response to a - # previous DeltaDiscoveryResponse, the response_nonce must be the nonce in - # the DeltaDiscoveryResponse. Otherwise (unlike in DiscoveryRequest) - # response_nonce must be omitted. - response_nonce: str = betterproto.string_field(6) - # This is populated when the previous :ref:`DiscoveryResponse - # ` failed to update configuration. The - # *message* field in *error_details* provides the Envoy internal exception - # related to the failure. - error_detail: "___google_rpc__.Status" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class DeltaDiscoveryResponse(betterproto.Message): - """[#next-free-field: 7]""" - - # The version of the response data (used for debugging). - system_version_info: str = betterproto.string_field(1) - # The response resources. These are typed resources, whose types must match - # the type_url field. - resources: List["Resource"] = betterproto.message_field(2) - # Type URL for resources. Identifies the xDS API when muxing over ADS. Must - # be consistent with the type_url in the Any within 'resources' if - # 'resources' is non-empty. - type_url: str = betterproto.string_field(4) - # Resources names of resources that have be deleted and to be removed from - # the xDS Client. Removed resources for missing resources can be ignored. - removed_resources: List[str] = betterproto.string_field(6) - # The nonce provides a way for DeltaDiscoveryRequests to uniquely reference a - # DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - nonce: str = betterproto.string_field(5) - - -@dataclass(eq=False, repr=False) -class Resource(betterproto.Message): - # The resource's name, to distinguish it from others of the same type of - # resource. - name: str = betterproto.string_field(3) - # The aliases are a list of other names that this resource can go by. - aliases: List[str] = betterproto.string_field(4) - # The resource level version. It allows xDS to track the state of individual - # resources. - version: str = betterproto.string_field(1) - # The resource being tracked. - resource: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class SrdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -@dataclass(eq=False, repr=False) -class CdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -@dataclass(eq=False, repr=False) -class EdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -@dataclass(eq=False, repr=False) -class LdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -class ScopedRoutesDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_scoped_routes( - self, - request_iterator: Union[ - AsyncIterable["DiscoveryRequest"], Iterable["DiscoveryRequest"] - ], - ) -> AsyncIterator["DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.ScopedRoutesDiscoveryService/StreamScopedRoutes", - request_iterator, - DiscoveryRequest, - DiscoveryResponse, - ): - yield response - - async def delta_scoped_routes( - self, - request_iterator: Union[ - AsyncIterable["DeltaDiscoveryRequest"], Iterable["DeltaDiscoveryRequest"] - ], - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.ScopedRoutesDiscoveryService/DeltaScopedRoutes", - request_iterator, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ): - yield response - - async def fetch_scoped_routes( - self, - *, - version_info: str = "", - node: "core.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "___google_rpc__.Status" = None - ) -> "DiscoveryResponse": - resource_names = resource_names or [] - - request = DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.api.v2.ScopedRoutesDiscoveryService/FetchScopedRoutes", - request, - DiscoveryResponse, - ) - - -class ClusterDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_clusters( - self, - request_iterator: Union[ - AsyncIterable["DiscoveryRequest"], Iterable["DiscoveryRequest"] - ], - ) -> AsyncIterator["DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.ClusterDiscoveryService/StreamClusters", - request_iterator, - DiscoveryRequest, - DiscoveryResponse, - ): - yield response - - async def delta_clusters( - self, - request_iterator: Union[ - AsyncIterable["DeltaDiscoveryRequest"], Iterable["DeltaDiscoveryRequest"] - ], - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.ClusterDiscoveryService/DeltaClusters", - request_iterator, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ): - yield response - - async def fetch_clusters( - self, - *, - version_info: str = "", - node: "core.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "___google_rpc__.Status" = None - ) -> "DiscoveryResponse": - resource_names = resource_names or [] - - request = DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.api.v2.ClusterDiscoveryService/FetchClusters", - request, - DiscoveryResponse, - ) - - -class RouteDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_routes( - self, - request_iterator: Union[ - AsyncIterable["DiscoveryRequest"], Iterable["DiscoveryRequest"] - ], - ) -> AsyncIterator["DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.RouteDiscoveryService/StreamRoutes", - request_iterator, - DiscoveryRequest, - DiscoveryResponse, - ): - yield response - - async def delta_routes( - self, - request_iterator: Union[ - AsyncIterable["DeltaDiscoveryRequest"], Iterable["DeltaDiscoveryRequest"] - ], - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.RouteDiscoveryService/DeltaRoutes", - request_iterator, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ): - yield response - - async def fetch_routes( - self, - *, - version_info: str = "", - node: "core.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "___google_rpc__.Status" = None - ) -> "DiscoveryResponse": - resource_names = resource_names or [] - - request = DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.api.v2.RouteDiscoveryService/FetchRoutes", - request, - DiscoveryResponse, - ) - - -class VirtualHostDiscoveryServiceStub(betterproto.ServiceStub): - async def delta_virtual_hosts( - self, - request_iterator: Union[ - AsyncIterable["DeltaDiscoveryRequest"], Iterable["DeltaDiscoveryRequest"] - ], - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.VirtualHostDiscoveryService/DeltaVirtualHosts", - request_iterator, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ): - yield response - - -class EndpointDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_endpoints( - self, - request_iterator: Union[ - AsyncIterable["DiscoveryRequest"], Iterable["DiscoveryRequest"] - ], - ) -> AsyncIterator["DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.EndpointDiscoveryService/StreamEndpoints", - request_iterator, - DiscoveryRequest, - DiscoveryResponse, - ): - yield response - - async def delta_endpoints( - self, - request_iterator: Union[ - AsyncIterable["DeltaDiscoveryRequest"], Iterable["DeltaDiscoveryRequest"] - ], - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.EndpointDiscoveryService/DeltaEndpoints", - request_iterator, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ): - yield response - - async def fetch_endpoints( - self, - *, - version_info: str = "", - node: "core.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "___google_rpc__.Status" = None - ) -> "DiscoveryResponse": - resource_names = resource_names or [] - - request = DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.api.v2.EndpointDiscoveryService/FetchEndpoints", - request, - DiscoveryResponse, - ) - - -class ListenerDiscoveryServiceStub(betterproto.ServiceStub): - async def delta_listeners( - self, - request_iterator: Union[ - AsyncIterable["DeltaDiscoveryRequest"], Iterable["DeltaDiscoveryRequest"] - ], - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.ListenerDiscoveryService/DeltaListeners", - request_iterator, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ): - yield response - - async def stream_listeners( - self, - request_iterator: Union[ - AsyncIterable["DiscoveryRequest"], Iterable["DiscoveryRequest"] - ], - ) -> AsyncIterator["DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.api.v2.ListenerDiscoveryService/StreamListeners", - request_iterator, - DiscoveryRequest, - DiscoveryResponse, - ): - yield response - - async def fetch_listeners( - self, - *, - version_info: str = "", - node: "core.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "___google_rpc__.Status" = None - ) -> "DiscoveryResponse": - resource_names = resource_names or [] - - request = DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", - request, - DiscoveryResponse, - ) - - -class ScopedRoutesDiscoveryServiceBase(ServiceBase): - async def stream_scoped_routes( - self, request_iterator: AsyncIterator["DiscoveryRequest"] - ) -> AsyncIterator["DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_scoped_routes( - self, request_iterator: AsyncIterator["DeltaDiscoveryRequest"] - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_scoped_routes( - self, - version_info: str, - node: "core.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "___google_rpc__.Status", - ) -> "DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_scoped_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_scoped_routes, - stream, - request_kwargs, - ) - - async def __rpc_delta_scoped_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_scoped_routes, - stream, - request_kwargs, - ) - - async def __rpc_fetch_scoped_routes(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_scoped_routes(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.api.v2.ScopedRoutesDiscoveryService/StreamScopedRoutes": grpclib.const.Handler( - self.__rpc_stream_scoped_routes, - grpclib.const.Cardinality.STREAM_STREAM, - DiscoveryRequest, - DiscoveryResponse, - ), - "/envoy.api.v2.ScopedRoutesDiscoveryService/DeltaScopedRoutes": grpclib.const.Handler( - self.__rpc_delta_scoped_routes, - grpclib.const.Cardinality.STREAM_STREAM, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ), - "/envoy.api.v2.ScopedRoutesDiscoveryService/FetchScopedRoutes": grpclib.const.Handler( - self.__rpc_fetch_scoped_routes, - grpclib.const.Cardinality.UNARY_UNARY, - DiscoveryRequest, - DiscoveryResponse, - ), - } - - -class ClusterDiscoveryServiceBase(ServiceBase): - async def stream_clusters( - self, request_iterator: AsyncIterator["DiscoveryRequest"] - ) -> AsyncIterator["DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_clusters( - self, request_iterator: AsyncIterator["DeltaDiscoveryRequest"] - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_clusters( - self, - version_info: str, - node: "core.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "___google_rpc__.Status", - ) -> "DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_clusters(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_clusters, - stream, - request_kwargs, - ) - - async def __rpc_delta_clusters(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_clusters, - stream, - request_kwargs, - ) - - async def __rpc_fetch_clusters(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_clusters(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.api.v2.ClusterDiscoveryService/StreamClusters": grpclib.const.Handler( - self.__rpc_stream_clusters, - grpclib.const.Cardinality.STREAM_STREAM, - DiscoveryRequest, - DiscoveryResponse, - ), - "/envoy.api.v2.ClusterDiscoveryService/DeltaClusters": grpclib.const.Handler( - self.__rpc_delta_clusters, - grpclib.const.Cardinality.STREAM_STREAM, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ), - "/envoy.api.v2.ClusterDiscoveryService/FetchClusters": grpclib.const.Handler( - self.__rpc_fetch_clusters, - grpclib.const.Cardinality.UNARY_UNARY, - DiscoveryRequest, - DiscoveryResponse, - ), - } - - -class RouteDiscoveryServiceBase(ServiceBase): - async def stream_routes( - self, request_iterator: AsyncIterator["DiscoveryRequest"] - ) -> AsyncIterator["DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_routes( - self, request_iterator: AsyncIterator["DeltaDiscoveryRequest"] - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_routes( - self, - version_info: str, - node: "core.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "___google_rpc__.Status", - ) -> "DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_routes, - stream, - request_kwargs, - ) - - async def __rpc_delta_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_routes, - stream, - request_kwargs, - ) - - async def __rpc_fetch_routes(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_routes(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.api.v2.RouteDiscoveryService/StreamRoutes": grpclib.const.Handler( - self.__rpc_stream_routes, - grpclib.const.Cardinality.STREAM_STREAM, - DiscoveryRequest, - DiscoveryResponse, - ), - "/envoy.api.v2.RouteDiscoveryService/DeltaRoutes": grpclib.const.Handler( - self.__rpc_delta_routes, - grpclib.const.Cardinality.STREAM_STREAM, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ), - "/envoy.api.v2.RouteDiscoveryService/FetchRoutes": grpclib.const.Handler( - self.__rpc_fetch_routes, - grpclib.const.Cardinality.UNARY_UNARY, - DiscoveryRequest, - DiscoveryResponse, - ), - } - - -class VirtualHostDiscoveryServiceBase(ServiceBase): - async def delta_virtual_hosts( - self, request_iterator: AsyncIterator["DeltaDiscoveryRequest"] - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_delta_virtual_hosts(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_virtual_hosts, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.api.v2.VirtualHostDiscoveryService/DeltaVirtualHosts": grpclib.const.Handler( - self.__rpc_delta_virtual_hosts, - grpclib.const.Cardinality.STREAM_STREAM, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ), - } - - -class EndpointDiscoveryServiceBase(ServiceBase): - async def stream_endpoints( - self, request_iterator: AsyncIterator["DiscoveryRequest"] - ) -> AsyncIterator["DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_endpoints( - self, request_iterator: AsyncIterator["DeltaDiscoveryRequest"] - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_endpoints( - self, - version_info: str, - node: "core.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "___google_rpc__.Status", - ) -> "DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_endpoints(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_endpoints, - stream, - request_kwargs, - ) - - async def __rpc_delta_endpoints(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_endpoints, - stream, - request_kwargs, - ) - - async def __rpc_fetch_endpoints(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_endpoints(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.api.v2.EndpointDiscoveryService/StreamEndpoints": grpclib.const.Handler( - self.__rpc_stream_endpoints, - grpclib.const.Cardinality.STREAM_STREAM, - DiscoveryRequest, - DiscoveryResponse, - ), - "/envoy.api.v2.EndpointDiscoveryService/DeltaEndpoints": grpclib.const.Handler( - self.__rpc_delta_endpoints, - grpclib.const.Cardinality.STREAM_STREAM, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ), - "/envoy.api.v2.EndpointDiscoveryService/FetchEndpoints": grpclib.const.Handler( - self.__rpc_fetch_endpoints, - grpclib.const.Cardinality.UNARY_UNARY, - DiscoveryRequest, - DiscoveryResponse, - ), - } - - -class ListenerDiscoveryServiceBase(ServiceBase): - async def delta_listeners( - self, request_iterator: AsyncIterator["DeltaDiscoveryRequest"] - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def stream_listeners( - self, request_iterator: AsyncIterator["DiscoveryRequest"] - ) -> AsyncIterator["DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_listeners( - self, - version_info: str, - node: "core.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "___google_rpc__.Status", - ) -> "DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_delta_listeners(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_listeners, - stream, - request_kwargs, - ) - - async def __rpc_stream_listeners(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_listeners, - stream, - request_kwargs, - ) - - async def __rpc_fetch_listeners(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_listeners(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.api.v2.ListenerDiscoveryService/DeltaListeners": grpclib.const.Handler( - self.__rpc_delta_listeners, - grpclib.const.Cardinality.STREAM_STREAM, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ), - "/envoy.api.v2.ListenerDiscoveryService/StreamListeners": grpclib.const.Handler( - self.__rpc_stream_listeners, - grpclib.const.Cardinality.STREAM_STREAM, - DiscoveryRequest, - DiscoveryResponse, - ), - "/envoy.api.v2.ListenerDiscoveryService/FetchListeners": grpclib.const.Handler( - self.__rpc_fetch_listeners, - grpclib.const.Cardinality.UNARY_UNARY, - DiscoveryRequest, - DiscoveryResponse, - ), - } - - -from . import auth -from . import cluster -from . import core -from . import endpoint -from . import listener -from . import route -from ... import type as __type__ -from ....google import rpc as ___google_rpc__ -from ...config.filter.accesslog import v2 as __config_filter_accesslog_v2__ -from ...config.listener import v2 as __config_listener_v2__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/api/v2/auth/__init__.py b/src/envoy_data_plane/envoy/api/v2/auth/__init__.py deleted file mode 100644 index 56ae193..0000000 --- a/src/envoy_data_plane/envoy/api/v2/auth/__init__.py +++ /dev/null @@ -1,410 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/auth/cert.proto, envoy/api/v2/auth/common.proto, envoy/api/v2/auth/secret.proto, envoy/api/v2/auth/tls.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class TlsParametersTlsProtocol(betterproto.Enum): - TLS_AUTO = 0 - TLSv1_0 = 1 - TLSv1_1 = 2 - TLSv1_2 = 3 - TLSv1_3 = 4 - - -class CertificateValidationContextTrustChainVerification(betterproto.Enum): - VERIFY_TRUST_CHAIN = 0 - ACCEPT_UNTRUSTED = 1 - - -@dataclass(eq=False, repr=False) -class TlsParameters(betterproto.Message): - # Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and - # ``TLSv1_0`` for servers. - tls_minimum_protocol_version: "TlsParametersTlsProtocol" = betterproto.enum_field(1) - # Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and - # ``TLSv1_3`` for servers. - tls_maximum_protocol_version: "TlsParametersTlsProtocol" = betterproto.enum_field(2) - # If specified, the TLS listener will only support the specified `cipher list - # `_ when negotiating TLS 1.0-1.2 - # (this setting has no effect when negotiating TLS 1.3). If not specified, - # the default list will be used. In non-FIPS builds, the default cipher list - # is: .. code-block:: none [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA- - # CHACHA20-POLY1305] [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA- - # CHACHA20-POLY1305] ECDHE-ECDSA-AES128-SHA ECDHE-RSA-AES128-SHA - # AES128-GCM-SHA256 AES128-SHA ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA- - # AES256-GCM-SHA384 ECDHE-ECDSA-AES256-SHA ECDHE-RSA-AES256-SHA - # AES256-GCM-SHA384 AES256-SHA In builds using :ref:`BoringSSL FIPS - # `, the default cipher list is: .. code-block:: none - # ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-RSA-AES128-GCM-SHA256 ECDHE-ECDSA- - # AES128-SHA ECDHE-RSA-AES128-SHA AES128-GCM-SHA256 AES128-SHA ECDHE- - # ECDSA-AES256-GCM-SHA384 ECDHE-RSA-AES256-GCM-SHA384 ECDHE-ECDSA- - # AES256-SHA ECDHE-RSA-AES256-SHA AES256-GCM-SHA384 AES256-SHA - cipher_suites: List[str] = betterproto.string_field(3) - # If specified, the TLS connection will only support the specified ECDH - # curves. If not specified, the default curves will be used. In non-FIPS - # builds, the default curves are: .. code-block:: none X25519 P-256 In - # builds using :ref:`BoringSSL FIPS `, the default - # curve is: .. code-block:: none P-256 - ecdh_curves: List[str] = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class PrivateKeyProvider(betterproto.Message): - """ - BoringSSL private key method configuration. The private key methods are - used for external (potentially asynchronous) signing and decryption - operations. Some use cases for private key methods would be TPM support and - TLS acceleration. - """ - - # Private key method provider name. The name must match a supported private - # key method provider type. - provider_name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("PrivateKeyProvider.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class TlsCertificate(betterproto.Message): - """[#next-free-field: 7]""" - - # The TLS certificate chain. - certificate_chain: "_core__.DataSource" = betterproto.message_field(1) - # The TLS private key. - private_key: "_core__.DataSource" = betterproto.message_field(2) - # BoringSSL private key method provider. This is an alternative to - # :ref:`private_key ` field. - # This can't be marked as ``oneof`` due to API compatibility reasons. Setting - # both :ref:`private_key ` - # and :ref:`private_key_provider - # ` fields will - # result in an error. - private_key_provider: "PrivateKeyProvider" = betterproto.message_field(6) - # The password to decrypt the TLS private key. If this field is not set, it - # is assumed that the TLS private key is not password encrypted. - password: "_core__.DataSource" = betterproto.message_field(3) - # [#not-implemented-hide:] - ocsp_staple: "_core__.DataSource" = betterproto.message_field(4) - # [#not-implemented-hide:] - signed_certificate_timestamp: List[ - "_core__.DataSource" - ] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class TlsSessionTicketKeys(betterproto.Message): - # Keys for encrypting and decrypting TLS session tickets. The first key in - # the array contains the key to encrypt all new sessions created by this - # context. All keys are candidates for decrypting received tickets. This - # allows for easy rotation of keys by, for example, putting the new key - # first, and the previous key second. If :ref:`session_ticket_keys - # ` is not - # specified, the TLS library will still support resuming sessions via - # tickets, but it will use an internally-generated and managed key, so - # sessions cannot be resumed across hot restarts or on different hosts. Each - # key must contain exactly 80 bytes of cryptographically-secure random data. - # For example, the output of ``openssl rand 80``. .. attention:: Using this - # feature has serious security considerations and risks. Improper handling of - # keys may result in loss of secrecy in connections, even if ciphers - # supporting perfect forward secrecy are used. See - # https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - # discussion. To minimize the risk, you must: * Keep the session ticket - # keys at least as secure as your TLS certificate private keys * Rotate - # session ticket keys at least daily, and preferably hourly * Always - # generate keys using a cryptographically-secure random data source - keys: List["_core__.DataSource"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CertificateValidationContext(betterproto.Message): - """[#next-free-field: 11]""" - - # TLS certificate data containing certificate authority certificates to use - # in verifying a presented peer certificate (e.g. server certificate for - # clusters or client certificate for listeners). If not specified and a peer - # certificate is presented it will not be verified. By default, a client - # certificate is optional, unless one of the additional options - # (:ref:`require_client_certificate - # `, - # :ref:`verify_certificate_spki `, :ref:`verify_certificate_hash `, or - # :ref:`match_subject_alt_names `) is also specified. It can optionally - # contain certificate revocation lists, in which case Envoy will verify that - # the presented peer certificate has not been revoked by one of the included - # CRLs. See :ref:`the TLS overview ` - # for a list of common system CA locations. - trusted_ca: "_core__.DataSource" = betterproto.message_field(1) - # An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will - # verify that the SHA-256 of the DER-encoded Subject Public Key Information - # (SPKI) of the presented certificate matches one of the specified values. A - # base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the - # certificate can be generated with the following command: .. code-block:: - # bash $ openssl x509 -in path/to/client.crt -noout -pubkey | openssl - # pkey -pubin -outform DER | openssl dgst -sha256 -binary | openssl - # enc -base64 NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= This is the - # format used in HTTP Public Key Pinning. When both: - # :ref:`verify_certificate_hash ` and :ref:`verify_certificate_spki ` are - # specified, a hash matching value from either of the lists will result in - # the certificate being accepted. .. attention:: This option is preferred - # over :ref:`verify_certificate_hash `, because SPKI is tied to a private - # key, so it doesn't change when the certificate is renewed using the same - # private key. - verify_certificate_spki: List[str] = betterproto.string_field(3) - # An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will - # verify that the SHA-256 of the DER-encoded presented certificate matches - # one of the specified values. A hex-encoded SHA-256 of the certificate can - # be generated with the following command: .. code-block:: bash $ openssl - # x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " - # -f2 df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a A - # long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the - # certificate can be generated with the following command: .. code-block:: - # bash $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | - # cut -d"=" -f2 DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83 - # :FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A Both of those formats are acceptable. - # When both: :ref:`verify_certificate_hash ` and - # :ref:`verify_certificate_spki ` are specified, a hash matching value from - # either of the lists will result in the certificate being accepted. - verify_certificate_hash: List[str] = betterproto.string_field(2) - # An optional list of Subject Alternative Names. If specified, Envoy will - # verify that the Subject Alternative Name of the presented certificate - # matches one of the specified values. .. attention:: Subject Alternative - # Names are easily spoofable and verifying only them is insecure, therefore - # this option must be used together with :ref:`trusted_ca - # `. - verify_subject_alt_name: List[str] = betterproto.string_field(4) - # An optional list of Subject Alternative name matchers. Envoy will verify - # that the Subject Alternative Name of the presented certificate matches one - # of the specified matches. When a certificate has wildcard DNS SAN entries, - # to match a specific client, it should be configured with exact match type - # in the :ref:`string matcher `. - # For example if the certificate has "\*.example.com" as DNS SAN entry, to - # allow only "api.example.com", it should be configured as shown below. .. - # code-block:: yaml match_subject_alt_names: exact: "api.example.com" .. - # attention:: Subject Alternative Names are easily spoofable and verifying - # only them is insecure, therefore this option must be used together with - # :ref:`trusted_ca - # `. - match_subject_alt_names: List[ - "___type_matcher__.StringMatcher" - ] = betterproto.message_field(9) - # [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - require_ocsp_staple: Optional[bool] = betterproto.message_field( - 5, wraps=betterproto.TYPE_BOOL - ) - # [#not-implemented-hide:] Must present signed certificate time-stamp. - require_signed_certificate_timestamp: Optional[bool] = betterproto.message_field( - 6, wraps=betterproto.TYPE_BOOL - ) - # An optional `certificate revocation list - # `_ (in PEM - # format). If specified, Envoy will verify that the presented peer - # certificate has not been revoked by this CRL. If this DataSource contains - # multiple CRLs, all of them will be used. - crl: "_core__.DataSource" = betterproto.message_field(7) - # If specified, Envoy will not reject expired certificates. - allow_expired_certificate: bool = betterproto.bool_field(8) - # Certificate trust chain verification mode. - trust_chain_verification: "CertificateValidationContextTrustChainVerification" = ( - betterproto.enum_field(10) - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.verify_subject_alt_name: - warnings.warn( - "CertificateValidationContext.verify_subject_alt_name is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class GenericSecret(betterproto.Message): - # Secret of generic type and is available to filters. - secret: "_core__.DataSource" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class SdsSecretConfig(betterproto.Message): - # Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely - # referred to. When both name and config are specified, then secret can be - # fetched and/or reloaded via SDS. When only name is specified, then secret - # will be loaded from static resources. - name: str = betterproto.string_field(1) - sds_config: "_core__.ConfigSource" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Secret(betterproto.Message): - """[#next-free-field: 6]""" - - # Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely - # referred to. - name: str = betterproto.string_field(1) - tls_certificate: "TlsCertificate" = betterproto.message_field(2, group="type") - session_ticket_keys: "TlsSessionTicketKeys" = betterproto.message_field( - 3, group="type" - ) - validation_context: "CertificateValidationContext" = betterproto.message_field( - 4, group="type" - ) - generic_secret: "GenericSecret" = betterproto.message_field(5, group="type") - - -@dataclass(eq=False, repr=False) -class UpstreamTlsContext(betterproto.Message): - # Common TLS context settings. .. attention:: Server certificate - # verification is not enabled by default. Configure :ref:`trusted_ca` to enable - # verification. - common_tls_context: "CommonTlsContext" = betterproto.message_field(1) - # SNI string to use when creating TLS backend connections. - sni: str = betterproto.string_field(2) - # If true, server-initiated TLS renegotiation will be allowed. .. attention:: - # TLS renegotiation is considered insecure and shouldn't be used unless - # absolutely necessary. - allow_renegotiation: bool = betterproto.bool_field(3) - # Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs - # and Session Tickets for TLSv1.2 and older) to store for the purpose of - # session resumption. Defaults to 1, setting this to 0 disables session - # resumption. - max_session_keys: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class DownstreamTlsContext(betterproto.Message): - """[#next-free-field: 8]""" - - # Common TLS context settings. - common_tls_context: "CommonTlsContext" = betterproto.message_field(1) - # If specified, Envoy will reject connections without a valid client - # certificate. - require_client_certificate: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # If specified, Envoy will reject connections without a valid and matching - # SNI. [#not-implemented-hide:] - require_sni: Optional[bool] = betterproto.message_field( - 3, wraps=betterproto.TYPE_BOOL - ) - # TLS session ticket key settings. - session_ticket_keys: "TlsSessionTicketKeys" = betterproto.message_field( - 4, group="session_ticket_keys_type" - ) - # Config for fetching TLS session ticket keys via SDS API. - session_ticket_keys_sds_secret_config: "SdsSecretConfig" = ( - betterproto.message_field(5, group="session_ticket_keys_type") - ) - # Config for controlling stateless TLS session resumption: setting this to - # true will cause the TLS server to not issue TLS session tickets for the - # purposes of stateless TLS session resumption. If set to false, the TLS - # server will issue TLS session tickets and encrypt/decrypt them using the - # keys specified through either :ref:`session_ticket_keys - # ` or - # :ref:`session_ticket_keys_sds_secret_config `. If this config is set - # to false and no keys are explicitly configured, the TLS server will issue - # TLS session tickets and encrypt/decrypt them using an internally-generated - # and managed key, with the implication that sessions cannot be resumed - # across hot restarts or on different hosts. - disable_stateless_session_resumption: bool = betterproto.bool_field( - 7, group="session_ticket_keys_type" - ) - # If specified, ``session_timeout`` will change the maximum lifetime (in - # seconds) of the TLS session. Currently this value is used as a hint for the - # `TLS session ticket lifetime (for TLSv1.2) - # `_. Only seconds can be - # specified (fractional seconds are ignored). - session_timeout: timedelta = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class CommonTlsContext(betterproto.Message): - """ - TLS context shared by both client and server TLS contexts. [#next-free- - field: 9] - """ - - # TLS protocol versions, cipher suites etc. - tls_params: "TlsParameters" = betterproto.message_field(1) - # :ref:`Multiple TLS certificates ` can be - # associated with the same context to allow both RSA and ECDSA certificates. - # Only a single TLS certificate is supported in client contexts. In server - # contexts, the first RSA certificate is used for clients that only support - # RSA and the first ECDSA certificate is used for clients that support ECDSA. - tls_certificates: List["TlsCertificate"] = betterproto.message_field(2) - # Configs for fetching TLS certificates via SDS API. - tls_certificate_sds_secret_configs: List[ - "SdsSecretConfig" - ] = betterproto.message_field(6) - # How to validate peer certificates. - validation_context: "CertificateValidationContext" = betterproto.message_field( - 3, group="validation_context_type" - ) - # Config for fetching validation context via SDS API. - validation_context_sds_secret_config: "SdsSecretConfig" = betterproto.message_field( - 7, group="validation_context_type" - ) - # Combined certificate validation context holds a default - # CertificateValidationContext and SDS config. When SDS server returns - # dynamic CertificateValidationContext, both dynamic and default - # CertificateValidationContext are merged into a new - # CertificateValidationContext for validation. This merge is done by - # Message::MergeFrom(), so dynamic CertificateValidationContext overwrites - # singular fields in default CertificateValidationContext, and concatenates - # repeated fields to default CertificateValidationContext, and logical OR is - # applied to boolean fields. - combined_validation_context: "CommonTlsContextCombinedCertificateValidationContext" = betterproto.message_field( - 8, group="validation_context_type" - ) - # Supplies the list of ALPN protocols that the listener should expose. In - # practice this is likely to be set to one of two values (see the - # :ref:`codec_type ` parameter in the HTTP connection - # manager for more information): * "h2,http/1.1" If the listener is going to - # support both HTTP/2 and HTTP/1.1. * "http/1.1" If the listener is only - # going to support HTTP/1.1. There is no default for this parameter. If - # empty, Envoy will not expose ALPN. - alpn_protocols: List[str] = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class CommonTlsContextCombinedCertificateValidationContext(betterproto.Message): - # How to validate peer certificates. - default_validation_context: "CertificateValidationContext" = ( - betterproto.message_field(1) - ) - # Config for fetching validation context via SDS API. - validation_context_sds_secret_config: "SdsSecretConfig" = betterproto.message_field( - 2 - ) - - -from .. import core as _core__ -from ....type import matcher as ___type_matcher__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/api/v2/cluster/__init__.py b/src/envoy_data_plane/envoy/api/v2/cluster/__init__.py deleted file mode 100644 index fafce12..0000000 --- a/src/envoy_data_plane/envoy/api/v2/cluster/__init__.py +++ /dev/null @@ -1,253 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/cluster/circuit_breaker.proto, envoy/api/v2/cluster/filter.proto, envoy/api/v2/cluster/outlier_detection.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CircuitBreakers(betterproto.Message): - """ - :ref:`Circuit breaking` settings can be - specified individually for each defined priority. - """ - - # If multiple - # :ref:`Thresholds` are - # defined with the same - # :ref:`RoutingPriority`, the first one - # in the list is used. If no Thresholds is defined for a given - # :ref:`RoutingPriority`, the default - # values are used. - thresholds: List["CircuitBreakersThresholds"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CircuitBreakersThresholds(betterproto.Message): - """ - A Thresholds defines CircuitBreaker settings for a - :ref:`RoutingPriority`. [#next-free- - field: 9] - """ - - # The :ref:`RoutingPriority` the - # specified CircuitBreaker settings apply to. - priority: "_core__.RoutingPriority" = betterproto.enum_field(1) - # The maximum number of connections that Envoy will make to the upstream - # cluster. If not specified, the default is 1024. - max_connections: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The maximum number of pending requests that Envoy will allow to the - # upstream cluster. If not specified, the default is 1024. - max_pending_requests: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # The maximum number of parallel requests that Envoy will make to the - # upstream cluster. If not specified, the default is 1024. - max_requests: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The maximum number of parallel retries that Envoy will allow to the - # upstream cluster. If not specified, the default is 3. - max_retries: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # Specifies a limit on concurrent retries in relation to the number of active - # requests. This parameter is optional. .. note:: If this field is set, - # the retry budget will override any configured retry circuit breaker. - retry_budget: "CircuitBreakersThresholdsRetryBudget" = betterproto.message_field(8) - # If track_remaining is true, then stats will be published that expose the - # number of resources remaining until the circuit breakers open. If not - # specified, the default is false. .. note:: If a retry budget is used in - # lieu of the max_retries circuit breaker, the remaining retry resources - # remaining will not be tracked. - track_remaining: bool = betterproto.bool_field(6) - # The maximum number of connection pools per cluster that Envoy will - # concurrently support at once. If not specified, the default is unlimited. - # Set this for clusters which create a large number of connection pools. See - # :ref:`Circuit Breaking - # ` for more - # details. - max_connection_pools: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class CircuitBreakersThresholdsRetryBudget(betterproto.Message): - # Specifies the limit on concurrent retries as a percentage of the sum of - # active requests and active pending requests. For example, if there are 100 - # active requests and the budget_percent is set to 25, there may be 25 active - # retries. This parameter is optional. Defaults to 20%. - budget_percent: "___type__.Percent" = betterproto.message_field(1) - # Specifies the minimum retry concurrency allowed for the retry budget. The - # limit on the number of active retries may never go below this number. This - # parameter is optional. Defaults to 3. - min_retry_concurrency: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class Filter(betterproto.Message): - # The name of the filter to instantiate. The name must match a - # :ref:`supported filter `. - name: str = betterproto.string_field(1) - # Filter specific configuration which depends on the filter being - # instantiated. See the supported filters for further documentation. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class OutlierDetection(betterproto.Message): - """ - See the :ref:`architecture overview ` for - more information on outlier detection. [#next-free-field: 21] - """ - - # The number of consecutive 5xx responses or local origin errors that are - # mapped to 5xx error codes before a consecutive 5xx ejection occurs. - # Defaults to 5. - consecutive_5_xx: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # The time interval between ejection analysis sweeps. This can result in both - # new ejections as well as hosts being returned to service. Defaults to - # 10000ms or 10s. - interval: timedelta = betterproto.message_field(2) - # The base time that a host is ejected for. The real time is equal to the - # base time multiplied by the number of times the host has been ejected. - # Defaults to 30000ms or 30s. - base_ejection_time: timedelta = betterproto.message_field(3) - # The maximum % of an upstream cluster that can be ejected due to outlier - # detection. Defaults to 10% but will eject at least one host regardless of - # the value. - max_ejection_percent: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through consecutive 5xx. This setting can be used to disable - # ejection or to ramp it up slowly. Defaults to 100. - enforcing_consecutive_5_xx: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through success rate statistics. This setting can be used to - # disable ejection or to ramp it up slowly. Defaults to 100. - enforcing_success_rate: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # The number of hosts in a cluster that must have enough request volume to - # detect success rate outliers. If the number of hosts is less than this - # setting, outlier detection via success rate statistics is not performed for - # any host in the cluster. Defaults to 5. - success_rate_minimum_hosts: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # The minimum number of total requests that must be collected in one interval - # (as defined by the interval duration above) to include this host in success - # rate based outlier detection. If the volume is lower than this setting, - # outlier detection via success rate statistics is not performed for that - # host. Defaults to 100. - success_rate_request_volume: Optional[int] = betterproto.message_field( - 8, wraps=betterproto.TYPE_UINT32 - ) - # This factor is used to determine the ejection threshold for success rate - # outlier ejection. The ejection threshold is the difference between the mean - # success rate, and the product of this factor and the standard deviation of - # the mean success rate: mean - (stdev * success_rate_stdev_factor). This - # factor is divided by a thousand to get a double. That is, if the desired - # factor is 1.9, the runtime value should be 1900. Defaults to 1900. - success_rate_stdev_factor: Optional[int] = betterproto.message_field( - 9, wraps=betterproto.TYPE_UINT32 - ) - # The number of consecutive gateway failures (502, 503, 504 status codes) - # before a consecutive gateway failure ejection occurs. Defaults to 5. - consecutive_gateway_failure: Optional[int] = betterproto.message_field( - 10, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through consecutive gateway failures. This setting can be used to - # disable ejection or to ramp it up slowly. Defaults to 0. - enforcing_consecutive_gateway_failure: Optional[int] = betterproto.message_field( - 11, wraps=betterproto.TYPE_UINT32 - ) - # Determines whether to distinguish local origin failures from external - # errors. If set to true the following configuration parameters are taken - # into account: :ref:`consecutive_local_origin_failure`, :ref:`enforcing_cons - # ecutive_local_origin_failure` and :ref:`enforcing_local_origin_su - # ccess_rate`. Defaults to false. - split_external_local_origin_errors: bool = betterproto.bool_field(12) - # The number of consecutive locally originated failures before ejection - # occurs. Defaults to 5. Parameter takes effect only when :ref:`split_externa - # l_local_origin_errors` is set to true. - consecutive_local_origin_failure: Optional[int] = betterproto.message_field( - 13, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through consecutive locally originated failures. This setting can - # be used to disable ejection or to ramp it up slowly. Defaults to 100. - # Parameter takes effect only when :ref:`split_external_local_origin_errors - # ` is set to true. - enforcing_consecutive_local_origin_failure: Optional[ - int - ] = betterproto.message_field(14, wraps=betterproto.TYPE_UINT32) - # The % chance that a host will be actually ejected when an outlier status is - # detected through success rate statistics for locally originated errors. - # This setting can be used to disable ejection or to ramp it up slowly. - # Defaults to 100. Parameter takes effect only when :ref:`split_external_loca - # l_origin_errors` is set to true. - enforcing_local_origin_success_rate: Optional[int] = betterproto.message_field( - 15, wraps=betterproto.TYPE_UINT32 - ) - # The failure percentage to use when determining failure percentage-based - # outlier detection. If the failure percentage of a given host is greater - # than or equal to this value, it will be ejected. Defaults to 85. - failure_percentage_threshold: Optional[int] = betterproto.message_field( - 16, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through failure percentage statistics. This setting can be used to - # disable ejection or to ramp it up slowly. Defaults to 0. [#next-major- - # version: setting this without setting failure_percentage_threshold should - # be invalid in v4.] - enforcing_failure_percentage: Optional[int] = betterproto.message_field( - 17, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through local-origin failure percentage statistics. This setting - # can be used to disable ejection or to ramp it up slowly. Defaults to 0. - enforcing_failure_percentage_local_origin: Optional[ - int - ] = betterproto.message_field(18, wraps=betterproto.TYPE_UINT32) - # The minimum number of hosts in a cluster in order to perform failure - # percentage-based ejection. If the total number of hosts in the cluster is - # less than this value, failure percentage-based ejection will not be - # performed. Defaults to 5. - failure_percentage_minimum_hosts: Optional[int] = betterproto.message_field( - 19, wraps=betterproto.TYPE_UINT32 - ) - # The minimum number of total requests that must be collected in one interval - # (as defined by the interval duration above) to perform failure percentage- - # based ejection for this host. If the volume is lower than this setting, - # failure percentage-based ejection will not be performed for this host. - # Defaults to 50. - failure_percentage_request_volume: Optional[int] = betterproto.message_field( - 20, wraps=betterproto.TYPE_UINT32 - ) - - -from .. import core as _core__ -from .... import type as ___type__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/api/v2/core/__init__.py b/src/envoy_data_plane/envoy/api/v2/core/__init__.py deleted file mode 100644 index 176bbf2..0000000 --- a/src/envoy_data_plane/envoy/api/v2/core/__init__.py +++ /dev/null @@ -1,1545 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/core/address.proto, envoy/api/v2/core/backoff.proto, envoy/api/v2/core/base.proto, envoy/api/v2/core/config_source.proto, envoy/api/v2/core/event_service_config.proto, envoy/api/v2/core/grpc_method_list.proto, envoy/api/v2/core/grpc_service.proto, envoy/api/v2/core/health_check.proto, envoy/api/v2/core/http_uri.proto, envoy/api/v2/core/protocol.proto, envoy/api/v2/core/socket_option.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class SocketOptionSocketState(betterproto.Enum): - STATE_PREBIND = 0 - STATE_BOUND = 1 - STATE_LISTENING = 2 - - -class SocketAddressProtocol(betterproto.Enum): - TCP = 0 - UDP = 1 - - -class RoutingPriority(betterproto.Enum): - """ - Envoy supports :ref:`upstream priority routing - ` both at the route and the virtual - cluster level. The current priority implementation uses different - connection pool and circuit breaking settings for each priority level. This - means that even for HTTP/2 requests, two physical connections will be used - to an upstream host. In the future Envoy will likely support true HTTP/2 - priority over a single upstream connection. - """ - - DEFAULT = 0 - HIGH = 1 - - -class RequestMethod(betterproto.Enum): - """HTTP request method.""" - - METHOD_UNSPECIFIED = 0 - GET = 1 - HEAD = 2 - POST = 3 - PUT = 4 - DELETE = 5 - CONNECT = 6 - OPTIONS = 7 - TRACE = 8 - PATCH = 9 - - -class TrafficDirection(betterproto.Enum): - """Identifies the direction of the traffic relative to the local Envoy.""" - - # Default option is unspecified. - UNSPECIFIED = 0 - # The transport is used for incoming traffic. - INBOUND = 1 - # The transport is used for outgoing traffic. - OUTBOUND = 2 - - -class ApiVersion(betterproto.Enum): - """ - xDS API version. This is used to describe both resource and transport - protocol versions (in distinct configuration fields). - """ - - # When not specified, we assume v2, to ease migration to Envoy's stable API - # versioning. If a client does not support v2 (e.g. due to deprecation), this - # is an invalid value. - AUTO = 0 - # Use xDS v2 API. - V2 = 1 - # Use xDS v3 API. - V3 = 2 - - -class ApiConfigSourceApiType(betterproto.Enum): - UNSUPPORTED_REST_LEGACY = 0 - REST = 1 - GRPC = 2 - DELTA_GRPC = 3 - - -class HealthStatus(betterproto.Enum): - """Endpoint health status.""" - - # The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0 - # Healthy. - HEALTHY = 1 - # Unhealthy. - UNHEALTHY = 2 - # Connection draining in progress. E.g., - # ``_ or - # ``_. This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3 - # Health check timed out. This is part of HDS and is interpreted by Envoy as - # *UNHEALTHY*. - TIMEOUT = 4 - # Degraded. - DEGRADED = 5 - - -class HttpProtocolOptionsHeadersWithUnderscoresAction(betterproto.Enum): - ALLOW = 0 - REJECT_REQUEST = 1 - DROP_HEADER = 2 - - -@dataclass(eq=False, repr=False) -class SocketOption(betterproto.Message): - """ - Generic socket option message. This would be used to set socket options - that might not exist in upstream kernels or precompiled Envoy binaries. - [#next-free-field: 7] - """ - - # An optional name to give this socket option for debugging, etc. Uniqueness - # is not required and no special meaning is assumed. - description: str = betterproto.string_field(1) - # Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - level: int = betterproto.int64_field(2) - # The numeric name as passed to setsockopt - name: int = betterproto.int64_field(3) - # Because many sockopts take an int value. - int_value: int = betterproto.int64_field(4, group="value") - # Otherwise it's a byte buffer. - buf_value: bytes = betterproto.bytes_field(5, group="value") - # The state in which the option will be applied. When used in BindConfig - # STATE_PREBIND is currently the only valid value. - state: "SocketOptionSocketState" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class Pipe(betterproto.Message): - # Unix Domain Socket path. On Linux, paths starting with '@' will use the - # abstract namespace. The starting '@' is replaced by a null byte by Envoy. - # Paths starting with '@' will result in an error in environments other than - # Linux. - path: str = betterproto.string_field(1) - # The mode for the Pipe. Not applicable for abstract sockets. - mode: int = betterproto.uint32_field(2) - - -@dataclass(eq=False, repr=False) -class SocketAddress(betterproto.Message): - """[#next-free-field: 7]""" - - protocol: "SocketAddressProtocol" = betterproto.enum_field(1) - # The address for this socket. :ref:`Listeners ` will bind - # to the address. An empty address is not allowed. Specify ``0.0.0.0`` or - # ``::`` to bind to any address. [#comment:TODO(zuercher) reinstate when - # implemented: It is possible to distinguish a Listener address via the - # prefix/suffix matching in :ref:`FilterChainMatch - # `.] When used within an upstream - # :ref:`BindConfig `, the address controls the - # source address of outbound connections. For :ref:`clusters - # `, the cluster type determines whether the address - # must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - # (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be - # customized via :ref:`resolver_name - # `. - address: str = betterproto.string_field(2) - port_value: int = betterproto.uint32_field(3, group="port_specifier") - # This is only valid if :ref:`resolver_name - # ` is specified below and - # the named resolver is capable of named port resolution. - named_port: str = betterproto.string_field(4, group="port_specifier") - # The name of the custom resolver. This must have been registered with Envoy. - # If this is empty, a context dependent default applies. If the address is a - # concrete IP address, no resolution will occur. If address is a hostname - # this should be set for resolution other than DNS. Specifying a custom - # resolver with *STRICT_DNS* or *LOGICAL_DNS* will generate an error at - # runtime. - resolver_name: str = betterproto.string_field(5) - # When binding to an IPv6 address above, this enables `IPv4 compatibility - # `_. Binding to ``::`` will - # allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - # IPv6 space as ``::FFFF:``. - ipv4_compat: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class TcpKeepalive(betterproto.Message): - # Maximum number of keepalive probes to send without response before deciding - # the connection is dead. Default is to use the OS level configuration - # (unless overridden, Linux defaults to 9.) - keepalive_probes: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # The number of seconds a connection needs to be idle before keep-alive - # probes start being sent. Default is to use the OS level configuration - # (unless overridden, Linux defaults to 7200s (i.e., 2 hours.) - keepalive_time: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The number of seconds between keep-alive probes. Default is to use the OS - # level configuration (unless overridden, Linux defaults to 75s.) - keepalive_interval: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class BindConfig(betterproto.Message): - # The address to bind to when creating a socket. - source_address: "SocketAddress" = betterproto.message_field(1) - # Whether to set the *IP_FREEBIND* option when creating the socket. When this - # flag is set to true, allows the :ref:`source_address - # ` to be an IP address - # that is not configured on the system running Envoy. When this flag is set - # to false, the option *IP_FREEBIND* is disabled on the socket. When this - # flag is not set (default), the socket is not modified, i.e. the option is - # neither enabled nor disabled. - freebind: Optional[bool] = betterproto.message_field(2, wraps=betterproto.TYPE_BOOL) - # Additional socket options that may not be present in Envoy source code or - # precompiled binaries. - socket_options: List["SocketOption"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Address(betterproto.Message): - """ - Addresses specify either a logical or physical address and port, which are - used to tell Envoy where to bind/listen, connect to upstream and find - management servers. - """ - - socket_address: "SocketAddress" = betterproto.message_field(1, group="address") - pipe: "Pipe" = betterproto.message_field(2, group="address") - - -@dataclass(eq=False, repr=False) -class CidrRange(betterproto.Message): - """ - CidrRange specifies an IP Address and a prefix length to construct the - subnet mask for a `CIDR `_ range. - """ - - # IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - address_prefix: str = betterproto.string_field(1) - # Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - prefix_len: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class BackoffStrategy(betterproto.Message): - """Configuration defining a jittered exponential back off strategy.""" - - # The base interval to be used for the next back off computation. It should - # be greater than zero and less than or equal to :ref:`max_interval - # `. - base_interval: timedelta = betterproto.message_field(1) - # Specifies the maximum interval between retries. This parameter is optional, - # but must be greater than or equal to the :ref:`base_interval - # ` if set. The default - # is 10 times the :ref:`base_interval - # `. - max_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HttpUri(betterproto.Message): - """Envoy external URI descriptor""" - - # The HTTP server URI. It should be a full FQDN with protocol, host and path. - # Example: .. code-block:: yaml uri: - # https://www.googleapis.com/oauth2/v1/certs - uri: str = betterproto.string_field(1) - # A cluster is created in the Envoy "cluster_manager" config section. This - # field specifies the cluster name. Example: .. code-block:: yaml cluster: - # jwks_cluster - cluster: str = betterproto.string_field(2, group="http_upstream_type") - # Sets the maximum duration in milliseconds that a response can take to - # arrive upon request. - timeout: timedelta = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Locality(betterproto.Message): - """ - Identifies location of where either Envoy runs or where upstream hosts run. - """ - - # Region this :ref:`zone ` belongs to. - region: str = betterproto.string_field(1) - # Defines the local service zone where Envoy is running. Though optional, it - # should be set if discovery service routing is used and the discovery - # service exposes :ref:`zone data - # `, either in this - # message or via :option:`--service-zone`. The meaning of zone is context - # dependent, e.g. `Availability Zone (AZ) - # `_ on AWS, `Zone - # `_ on GCP, etc. - zone: str = betterproto.string_field(2) - # When used for locality of upstream hosts, this field further splits zone - # into smaller chunks of sub-zones so they can be load balanced - # independently. - sub_zone: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class BuildVersion(betterproto.Message): - """ - BuildVersion combines SemVer version of extension with free-form build - information (i.e. 'alpha', 'private-build') as a set of strings. - """ - - # SemVer version of extension. - version: "___type__.SemanticVersion" = betterproto.message_field(1) - # Free-form build information. Envoy defines several well known keys in the - # source/common/version/version.h file - metadata: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Extension(betterproto.Message): - """ - Version and identification for an Envoy extension. [#next-free-field: 6] - """ - - # This is the name of the Envoy filter as specified in the Envoy - # configuration, e.g. envoy.filters.http.router, com.acme.widget. - name: str = betterproto.string_field(1) - # Category of the extension. Extension category names use reverse DNS - # notation. For instance "envoy.filters.listener" for Envoy's built-in - # listener filters or "com.acme.filters.http" for HTTP filters from acme.com - # vendor. [#comment:TODO(yanavlasov): Link to the doc with existing envoy - # category names.] - category: str = betterproto.string_field(2) - # [#not-implemented-hide:] Type descriptor of extension configuration proto. - # [#comment:TODO(yanavlasov): Link to the doc with existing configuration - # protos.] [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - type_descriptor: str = betterproto.string_field(3) - # The version is a property of the extension and maintained independently of - # other extensions and the Envoy API. This field is not set when extension - # did not provide version information. - version: "BuildVersion" = betterproto.message_field(4) - # Indicates that the extension is present but was disabled via dynamic - # configuration. - disabled: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class Node(betterproto.Message): - """ - Identifies a specific Envoy instance. The node identifier is presented to - the management server, which may use this identifier to distinguish per - Envoy configuration for serving. [#next-free-field: 12] - """ - - # An opaque node identifier for the Envoy node. This also provides the local - # service node name. It should be set if any of the following features are - # used: :ref:`statsd `, :ref:`CDS - # `, and :ref:`HTTP tracing - # `, either in this message or via :option:`--service- - # node`. - id: str = betterproto.string_field(1) - # Defines the local service cluster name where Envoy is running. Though - # optional, it should be set if any of the following features are used: - # :ref:`statsd `, :ref:`health check cluster - # verification - # `, - # :ref:`runtime override directory - # `, :ref:`user agent addition `, :ref:`HTTP global rate limiting - # `, :ref:`CDS `, - # and :ref:`HTTP tracing `, either in this message or - # via :option:`--service-cluster`. - cluster: str = betterproto.string_field(2) - # Opaque metadata extending the node identifier. Envoy will pass this - # directly to the management server. - metadata: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(3) - # Locality specifying where the Envoy instance is running. - locality: "Locality" = betterproto.message_field(4) - # This is motivated by informing a management server during canary which - # version of Envoy is being tested in a heterogeneous fleet. This will be set - # by Envoy in management server RPCs. This field is deprecated in favor of - # the user_agent_name and user_agent_version values. - build_version: str = betterproto.string_field(5) - # Free-form string that identifies the entity requesting config. E.g. "envoy" - # or "grpc" - user_agent_name: str = betterproto.string_field(6) - # Free-form string that identifies the version of the entity requesting - # config. E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - user_agent_version: str = betterproto.string_field( - 7, group="user_agent_version_type" - ) - # Structured version of the entity requesting config. - user_agent_build_version: "BuildVersion" = betterproto.message_field( - 8, group="user_agent_version_type" - ) - # List of extensions and their versions supported by the node. - extensions: List["Extension"] = betterproto.message_field(9) - # Client feature support list. These are well known features described in the - # Envoy API repository for a given major version of an API. Client features - # use reverse DNS naming scheme, for example `com.acme.feature`. See - # :ref:`the list of features ` that xDS client may support. - client_features: List[str] = betterproto.string_field(10) - # Known listening ports on the node as a generic hint to the management - # server for filtering :ref:`listeners ` to be returned. - # For example, if there is a listener bound to port 80, the list can - # optionally contain the SocketAddress `(0.0.0.0,80)`. The field is optional - # and just a hint. - listening_addresses: List["Address"] = betterproto.message_field(11) - - def __post_init__(self) -> None: - super().__post_init__() - if self.build_version: - warnings.warn("Node.build_version is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class Metadata(betterproto.Message): - """ - Metadata provides additional inputs to filters based on matched listeners, - filter chains, routes and endpoints. It is structured as a map, usually - from filter name (in reverse DNS format) to metadata specific to the - filter. Metadata key-values for a filter are merged as connection and - request handling occurs, with later values for the same key overriding - earlier values. An example use of metadata is providing additional values - to http_connection_manager in the envoy.http_connection_manager.access_log - namespace. Another example use of metadata is to per service config info in - cluster metadata, which may get consumed by multiple filters. For load - balancing, Metadata provides a means to subset cluster endpoints. Endpoints - have a Metadata object associated and routes contain a Metadata object to - match against. There are some well defined metadata used today for this - purpose: * ``{"envoy.lb": {"canary": }}`` This indicates the canary - status of an endpoint and is also used during header processing - (x-envoy-upstream-canary) and for stats purposes. [#next-major-version: - move to type/metadata/v2] - """ - - # Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - # namespace is reserved for Envoy's built-in filters. - filter_metadata: Dict[ - str, "betterproto_lib_google_protobuf.Struct" - ] = betterproto.map_field(1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - - -@dataclass(eq=False, repr=False) -class RuntimeUInt32(betterproto.Message): - """Runtime derived uint32 with a default when not specified.""" - - # Default value if runtime value is not available. - default_value: int = betterproto.uint32_field(2) - # Runtime key to get value for comparison. This value is used if defined. - runtime_key: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class RuntimeDouble(betterproto.Message): - """Runtime derived double with a default when not specified.""" - - # Default value if runtime value is not available. - default_value: float = betterproto.double_field(1) - # Runtime key to get value for comparison. This value is used if defined. - runtime_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RuntimeFeatureFlag(betterproto.Message): - """Runtime derived bool with a default when not specified.""" - - # Default value if runtime value is not available. - default_value: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # Runtime key to get value for comparison. This value is used if defined. The - # boolean value must be represented via its `canonical JSON encoding - # `_. - runtime_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class HeaderValue(betterproto.Message): - """Header name/value pair.""" - - # Header name. - key: str = betterproto.string_field(1) - # Header value. The same :ref:`format specifier ` - # as used for :ref:`HTTP access logging ` applies here, - # however unknown header values are replaced with the empty string instead of - # `-`. - value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class HeaderValueOption(betterproto.Message): - """Header name/value pair plus option to control append behavior.""" - - # Header name/value pair that this option applies to. - header: "HeaderValue" = betterproto.message_field(1) - # Should the value be appended? If true (default), the value is appended to - # existing values. - append: Optional[bool] = betterproto.message_field(2, wraps=betterproto.TYPE_BOOL) - - -@dataclass(eq=False, repr=False) -class HeaderMap(betterproto.Message): - """Wrapper for a set of headers.""" - - headers: List["HeaderValue"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DataSource(betterproto.Message): - """Data source consisting of either a file or an inline value.""" - - # Local filesystem data source. - filename: str = betterproto.string_field(1, group="specifier") - # Bytes inlined in the configuration. - inline_bytes: bytes = betterproto.bytes_field(2, group="specifier") - # String inlined in the configuration. - inline_string: str = betterproto.string_field(3, group="specifier") - - -@dataclass(eq=False, repr=False) -class RetryPolicy(betterproto.Message): - """ - The message specifies the retry policy of remote data source when fetching - fails. - """ - - # Specifies parameters that control :ref:`retry backoff strategy - # `. This parameter is optional, in which - # case the default base interval is 1000 milliseconds. The default maximum - # interval is 10 times the base interval. - retry_back_off: "BackoffStrategy" = betterproto.message_field(1) - # Specifies the allowed number of retries. This parameter is optional and - # defaults to 1. - num_retries: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class RemoteDataSource(betterproto.Message): - """ - The message specifies how to fetch data from remote and how to verify it. - """ - - # The HTTP URI to fetch the remote data. - http_uri: "HttpUri" = betterproto.message_field(1) - # SHA256 string for verifying data. - sha256: str = betterproto.string_field(2) - # Retry policy for fetching remote data. - retry_policy: "RetryPolicy" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class AsyncDataSource(betterproto.Message): - """Async data source which support async data fetch.""" - - # Local async data source. - local: "DataSource" = betterproto.message_field(1, group="specifier") - # Remote async data source. - remote: "RemoteDataSource" = betterproto.message_field(2, group="specifier") - - -@dataclass(eq=False, repr=False) -class TransportSocket(betterproto.Message): - """ - Configuration for transport socket in :ref:`listeners ` - and :ref:`clusters `. If the configuration is empty, - a default transport socket implementation and configuration will be chosen - based on the platform and existence of tls_context. - """ - - # The name of the transport socket to instantiate. The name must match a - # supported transport socket implementation. - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("TransportSocket.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class RuntimeFractionalPercent(betterproto.Message): - """ - Runtime derived FractionalPercent with defaults for when the numerator or - denominator is not specified via a runtime key. .. note:: Parsing of the - runtime key's data is implemented such that it may be represented as a - :ref:`FractionalPercent ` proto - represented as JSON/YAML and may also be represented as an integer with - the assumption that the value is an integral percentage out of 100. For - instance, a runtime key lookup returning the value "42" would parse as a - `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. - """ - - # Default value if the runtime value's for the numerator/denominator keys are - # not available. - default_value: "___type__.FractionalPercent" = betterproto.message_field(1) - # Runtime key for a YAML representation of a FractionalPercent. - runtime_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ControlPlane(betterproto.Message): - """ - Identifies a specific ControlPlane instance that Envoy is connected to. - """ - - # An opaque control plane identifier that uniquely identifies an instance of - # control plane. This can be used to identify which control plane instance, - # the Envoy is connected to. - identifier: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcService(betterproto.Message): - """ - gRPC service configuration. This is used by :ref:`ApiConfigSource - ` and filter configurations. [#next- - free-field: 6] - """ - - # Envoy's in-built gRPC client. See the :ref:`gRPC services overview - # ` documentation for discussion on gRPC client - # selection. - envoy_grpc: "GrpcServiceEnvoyGrpc" = betterproto.message_field( - 1, group="target_specifier" - ) - # `Google C++ gRPC client `_ See the :ref:`gRPC - # services overview ` documentation for - # discussion on gRPC client selection. - google_grpc: "GrpcServiceGoogleGrpc" = betterproto.message_field( - 2, group="target_specifier" - ) - # The timeout for the gRPC request. This is the timeout for a specific - # request. - timeout: timedelta = betterproto.message_field(3) - # Additional metadata to include in streams initiated to the GrpcService. - # This can be used for scenarios in which additional ad hoc authorization - # headers (e.g. ``x-foo-bar: baz-key``) are to be injected. - initial_metadata: List["HeaderValue"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class GrpcServiceEnvoyGrpc(betterproto.Message): - # The name of the upstream gRPC cluster. SSL credentials will be supplied in - # the :ref:`Cluster ` :ref:`transport_socket - # `. - cluster_name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpc(betterproto.Message): - """[#next-free-field: 7]""" - - # The target URI when using the `Google C++ gRPC client - # `_. SSL credentials will be supplied in - # :ref:`channel_credentials - # `. - target_uri: str = betterproto.string_field(1) - channel_credentials: "GrpcServiceGoogleGrpcChannelCredentials" = ( - betterproto.message_field(2) - ) - # A set of call credentials that can be composed with `channel credentials - # `_. - call_credentials: List[ - "GrpcServiceGoogleGrpcCallCredentials" - ] = betterproto.message_field(3) - # The human readable prefix to use when emitting statistics for the gRPC - # service. .. csv-table:: :header: Name, Type, Description :widths: 1, - # 1, 2 streams_total, Counter, Total number of streams opened - # streams_closed_, Counter, Total streams closed with - stat_prefix: str = betterproto.string_field(4) - # The name of the Google gRPC credentials factory to use. This must have been - # registered with Envoy. If this is empty, a default credentials factory will - # be used that sets up channel credentials based on other configuration - # parameters. - credentials_factory_name: str = betterproto.string_field(5) - # Additional configuration for site-specific customizations of the Google - # gRPC library. - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcSslCredentials(betterproto.Message): - """ - See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - """ - - # PEM encoded server root certificates. - root_certs: "DataSource" = betterproto.message_field(1) - # PEM encoded client private key. - private_key: "DataSource" = betterproto.message_field(2) - # PEM encoded client certificate chain. - cert_chain: "DataSource" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcGoogleLocalCredentials(betterproto.Message): - """ - Local channel credentials. Only UDS is supported for now. See - https://github.com/grpc/grpc/pull/15909. - """ - - pass - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcChannelCredentials(betterproto.Message): - """ - See https://grpc.io/docs/guides/auth.html#credential-types to understand - Channel and Call credential types. - """ - - ssl_credentials: "GrpcServiceGoogleGrpcSslCredentials" = betterproto.message_field( - 1, group="credential_specifier" - ) - # https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f - # 61 - google_default: "betterproto_lib_google_protobuf.Empty" = betterproto.message_field( - 2, group="credential_specifier" - ) - local_credentials: "GrpcServiceGoogleGrpcGoogleLocalCredentials" = ( - betterproto.message_field(3, group="credential_specifier") - ) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentials(betterproto.Message): - """[#next-free-field: 8]""" - - # Access token credentials. https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a8 - # 0da696ffdaea943f0f858d7a360d. - access_token: str = betterproto.string_field(1, group="credential_specifier") - # Google Compute Engine credentials. https://grpc.io/grpc/cpp/namespacegrpc.h - # tml#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google_compute_engine: "betterproto_lib_google_protobuf.Empty" = ( - betterproto.message_field(2, group="credential_specifier") - ) - # Google refresh token credentials. https://grpc.io/grpc/cpp/namespacegrpc.ht - # ml#a96901c997b91bc6513b08491e0dca37c. - google_refresh_token: str = betterproto.string_field( - 3, group="credential_specifier" - ) - # Service Account JWT Access credentials. https://grpc.io/grpc/cpp/namespaceg - # rpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - service_account_jwt_access: "GrpcServiceGoogleGrpcCallCredentialsServiceAccountJwtAccessCredentials" = betterproto.message_field( - 4, group="credential_specifier" - ) - # Google IAM credentials. https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc - # 101b41e680d47028166e76f9d0. - google_iam: "GrpcServiceGoogleGrpcCallCredentialsGoogleIamCredentials" = ( - betterproto.message_field(5, group="credential_specifier") - ) - # Custom authenticator credentials. https://grpc.io/grpc/cpp/namespacegrpc.ht - # ml#a823c6a4b19ffc71fb33e90154ee2ad07. - # https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other- - # authentication-mechanisms. - from_plugin: "GrpcServiceGoogleGrpcCallCredentialsMetadataCredentialsFromPlugin" = ( - betterproto.message_field(6, group="credential_specifier") - ) - # Custom security token service which implements OAuth 2.0 token exchange. - # https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 See - # https://github.com/grpc/grpc/pull/19587. - sts_service: "GrpcServiceGoogleGrpcCallCredentialsStsService" = ( - betterproto.message_field(7, group="credential_specifier") - ) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsServiceAccountJwtAccessCredentials( - betterproto.Message -): - json_key: str = betterproto.string_field(1) - token_lifetime_seconds: int = betterproto.uint64_field(2) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsGoogleIamCredentials(betterproto.Message): - authorization_token: str = betterproto.string_field(1) - authority_selector: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsMetadataCredentialsFromPlugin( - betterproto.Message -): - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn( - "GrpcServiceGoogleGrpcCallCredentialsMetadataCredentialsFromPlugin.config is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsStsService(betterproto.Message): - """ - Security token service configuration that allows Google gRPC to fetch - security token from an OAuth 2.0 authorization server. See - https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - https://github.com/grpc/grpc/pull/19587. [#next-free-field: 10] - """ - - # URI of the token exchange service that handles token exchange requests. - # [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - # https://github.com/envoyproxy/protoc-gen-validate/issues/303] - token_exchange_service_uri: str = betterproto.string_field(1) - # Location of the target service or resource where the client intends to use - # the requested security token. - resource: str = betterproto.string_field(2) - # Logical name of the target service where the client intends to use the - # requested security token. - audience: str = betterproto.string_field(3) - # The desired scope of the requested security token in the context of the - # service or resource where the token will be used. - scope: str = betterproto.string_field(4) - # Type of the requested security token. - requested_token_type: str = betterproto.string_field(5) - # The path of subject token, a security token that represents the identity of - # the party on behalf of whom the request is being made. - subject_token_path: str = betterproto.string_field(6) - # Type of the subject token. - subject_token_type: str = betterproto.string_field(7) - # The path of actor token, a security token that represents the identity of - # the acting party. The acting party is authorized to use the requested - # security token and act on behalf of the subject. - actor_token_path: str = betterproto.string_field(8) - # Type of the actor token. - actor_token_type: str = betterproto.string_field(9) - - -@dataclass(eq=False, repr=False) -class ApiConfigSource(betterproto.Message): - """ - API configuration source. This identifies the API type and cluster that - Envoy will use to fetch an xDS API. [#next-free-field: 9] - """ - - # API type (gRPC, REST, delta gRPC) - api_type: "ApiConfigSourceApiType" = betterproto.enum_field(1) - # API version for xDS transport protocol. This describes the xDS gRPC/REST - # endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - transport_api_version: "ApiVersion" = betterproto.enum_field(8) - # Cluster names should be used only with REST. If > 1 cluster is defined, - # clusters will be cycled through if any kind of failure occurs. .. note:: - # The cluster with name ``cluster_name`` must be statically defined and its - # type must not be ``EDS``. - cluster_names: List[str] = betterproto.string_field(2) - # Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - # services will be cycled through if any kind of failure occurs. - grpc_services: List["GrpcService"] = betterproto.message_field(4) - # For REST APIs, the delay between successive polls. - refresh_delay: timedelta = betterproto.message_field(3) - # For REST APIs, the request timeout. If not set, a default value of 1s will - # be used. - request_timeout: timedelta = betterproto.message_field(5) - # For GRPC APIs, the rate limit settings. If present, discovery requests made - # by Envoy will be rate limited. - rate_limit_settings: "RateLimitSettings" = betterproto.message_field(6) - # Skip the node identifier in subsequent discovery requests for streaming - # gRPC config types. - set_node_on_first_message_only: bool = betterproto.bool_field(7) - - -@dataclass(eq=False, repr=False) -class AggregatedConfigSource(betterproto.Message): - """ - Aggregated Discovery Service (ADS) options. This is currently empty, but - when set in :ref:`ConfigSource ` can be - used to specify that ADS is to be used. - """ - - pass - - -@dataclass(eq=False, repr=False) -class SelfConfigSource(betterproto.Message): - """ - [#not-implemented-hide:] Self-referencing config source options. This is - currently empty, but when set in :ref:`ConfigSource - ` can be used to specify that other data - can be obtained from the same server. - """ - - # API version for xDS transport protocol. This describes the xDS gRPC/REST - # endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - transport_api_version: "ApiVersion" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class RateLimitSettings(betterproto.Message): - """ - Rate Limit settings to be applied for discovery requests made by Envoy. - """ - - # Maximum number of tokens to be used for rate limiting discovery request - # calls. If not set, a default value of 100 will be used. - max_tokens: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Rate at which tokens will be filled per second. If not set, a default fill - # rate of 10 tokens per second will be used. - fill_rate: Optional[float] = betterproto.message_field( - 2, wraps=betterproto.TYPE_DOUBLE - ) - - -@dataclass(eq=False, repr=False) -class ConfigSource(betterproto.Message): - """ - Configuration for :ref:`listeners `, :ref:`clusters - `, :ref:`routes - `, :ref:`endpoints - ` etc. may either be sourced from the - filesystem or from an xDS API source. Filesystem configs are watched with - inotify for updates. [#next-free-field: 7] - """ - - # Path on the filesystem to source and watch for configuration updates. When - # sourcing configuration for :ref:`secret `, the - # certificate and key files are also watched for updates. .. note:: The path - # to the source must exist at config load time. .. note:: Envoy will only - # watch the file path for *moves.* This is because in general only moves - # are atomic. The same method of swapping files as is demonstrated in the - # :ref:`runtime documentation ` can be - # used here also. - path: str = betterproto.string_field(1, group="config_source_specifier") - # API configuration source. - api_config_source: "ApiConfigSource" = betterproto.message_field( - 2, group="config_source_specifier" - ) - # When set, ADS will be used to fetch resources. The ADS API configuration - # source in the bootstrap configuration is used. - ads: "AggregatedConfigSource" = betterproto.message_field( - 3, group="config_source_specifier" - ) - # [#not-implemented-hide:] When set, the client will access the resources - # from the same server it got the ConfigSource from, although not necessarily - # from the same stream. This is similar to the - # :ref:`ads` field, except that the client - # may use a different stream to the same server. As a result, this field can - # be used for things like LRS that cannot be sent on an ADS stream. It can - # also be used to link from (e.g.) LDS to RDS on the same server without - # requiring the management server to know its name or required credentials. - # [#next-major-version: In xDS v3, consider replacing the ads field with this - # one, since this field can implicitly mean to use the same stream in the - # case where the ConfigSource is provided via ADS and the specified data can - # also be obtained via ADS.] - self: "SelfConfigSource" = betterproto.message_field( - 5, group="config_source_specifier" - ) - # When this timeout is specified, Envoy will wait no longer than the - # specified time for first config response on this xDS subscription during - # the :ref:`initialization process `. After - # reaching the timeout, Envoy will move to the next initialization phase, - # even if the first config is not delivered yet. The timer is activated when - # the xDS API subscription starts, and is disarmed on first config update or - # on error. 0 means no timeout - Envoy will wait indefinitely for the first - # xDS config (unless another timeout applies). The default is 15s. - initial_fetch_timeout: timedelta = betterproto.message_field(4) - # API version for xDS resources. This implies the type URLs that the client - # will request for resources and the resource type that the client will in - # turn expect to be delivered. - resource_api_version: "ApiVersion" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class EventServiceConfig(betterproto.Message): - """ - [#not-implemented-hide:] Configuration of the event reporting service - endpoint. - """ - - # Specifies the gRPC service that hosts the event reporting service. - grpc_service: "GrpcService" = betterproto.message_field( - 1, group="config_source_specifier" - ) - - -@dataclass(eq=False, repr=False) -class HealthCheck(betterproto.Message): - """[#next-free-field: 23]""" - - # The time to wait for a health check response. If the timeout is reached the - # health check attempt will be considered a failure. - timeout: timedelta = betterproto.message_field(1) - # The interval between health checks. - interval: timedelta = betterproto.message_field(2) - # An optional jitter amount in milliseconds. If specified, Envoy will start - # health checking after for a random time in ms between 0 and initial_jitter. - # This only applies to the first health check. - initial_jitter: timedelta = betterproto.message_field(20) - # An optional jitter amount in milliseconds. If specified, during every - # interval Envoy will add interval_jitter to the wait time. - interval_jitter: timedelta = betterproto.message_field(3) - # An optional jitter amount as a percentage of interval_ms. If specified, - # during every interval Envoy will add interval_ms * interval_jitter_percent - # / 100 to the wait time. If interval_jitter_ms and interval_jitter_percent - # are both set, both of them will be used to increase the wait time. - interval_jitter_percent: int = betterproto.uint32_field(18) - # The number of unhealthy health checks required before a host is marked - # unhealthy. Note that for *http* health checking if a host responds with 503 - # this threshold is ignored and the host is considered unhealthy immediately. - unhealthy_threshold: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The number of healthy health checks required before a host is marked - # healthy. Note that during startup, only a single successful health check is - # required to mark a host healthy. - healthy_threshold: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # [#not-implemented-hide:] Non-serving port for health checking. - alt_port: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # Reuse health check connection between health checks. Default is true. - reuse_connection: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL - ) - # HTTP health check. - http_health_check: "HealthCheckHttpHealthCheck" = betterproto.message_field( - 8, group="health_checker" - ) - # TCP health check. - tcp_health_check: "HealthCheckTcpHealthCheck" = betterproto.message_field( - 9, group="health_checker" - ) - # gRPC health check. - grpc_health_check: "HealthCheckGrpcHealthCheck" = betterproto.message_field( - 11, group="health_checker" - ) - # Custom health check. - custom_health_check: "HealthCheckCustomHealthCheck" = betterproto.message_field( - 13, group="health_checker" - ) - # The "no traffic interval" is a special health check interval that is used - # when a cluster has never had traffic routed to it. This lower interval - # allows cluster information to be kept up to date, without sending a - # potentially large amount of active health checking traffic for no reason. - # Once a cluster has been used for traffic routing, Envoy will shift back to - # using the standard health check interval that is defined. Note that this - # interval takes precedence over any other. The default value for "no traffic - # interval" is 60 seconds. - no_traffic_interval: timedelta = betterproto.message_field(12) - # The "unhealthy interval" is a health check interval that is used for hosts - # that are marked as unhealthy. As soon as the host is marked as healthy, - # Envoy will shift back to using the standard health check interval that is - # defined. The default value for "unhealthy interval" is the same as - # "interval". - unhealthy_interval: timedelta = betterproto.message_field(14) - # The "unhealthy edge interval" is a special health check interval that is - # used for the first health check right after a host is marked as unhealthy. - # For subsequent health checks Envoy will shift back to using either - # "unhealthy interval" if present or the standard health check interval that - # is defined. The default value for "unhealthy edge interval" is the same as - # "unhealthy interval". - unhealthy_edge_interval: timedelta = betterproto.message_field(15) - # The "healthy edge interval" is a special health check interval that is used - # for the first health check right after a host is marked as healthy. For - # subsequent health checks Envoy will shift back to using the standard health - # check interval that is defined. The default value for "healthy edge - # interval" is the same as the default interval. - healthy_edge_interval: timedelta = betterproto.message_field(16) - # Specifies the path to the :ref:`health check event log - # `. If empty, no event log will be - # written. - event_log_path: str = betterproto.string_field(17) - # [#not-implemented-hide:] The gRPC service for the health check event - # service. If empty, health check events won't be sent to a remote endpoint. - event_service: "EventServiceConfig" = betterproto.message_field(22) - # If set to true, health check failure events will always be logged. If set - # to false, only the initial health check failure event will be logged. The - # default value is false. - always_log_health_check_failures: bool = betterproto.bool_field(19) - # This allows overriding the cluster TLS settings, just for health check - # connections. - tls_options: "HealthCheckTlsOptions" = betterproto.message_field(21) - - -@dataclass(eq=False, repr=False) -class HealthCheckPayload(betterproto.Message): - """Describes the encoding of the payload bytes in the payload.""" - - # Hex encoded payload. E.g., "000000FF". - text: str = betterproto.string_field(1, group="payload") - # [#not-implemented-hide:] Binary payload. - binary: bytes = betterproto.bytes_field(2, group="payload") - - -@dataclass(eq=False, repr=False) -class HealthCheckHttpHealthCheck(betterproto.Message): - """[#next-free-field: 12]""" - - # The value of the host header in the HTTP health check request. If left - # empty (default value), the name of the cluster this health check is - # associated with will be used. The host header can be customized for a - # specific endpoint by setting the :ref:`hostname - # ` field. - host: str = betterproto.string_field(1) - # Specifies the HTTP path that will be requested during health checking. For - # example */healthcheck*. - path: str = betterproto.string_field(2) - # [#not-implemented-hide:] HTTP specific payload. - send: "HealthCheckPayload" = betterproto.message_field(3) - # [#not-implemented-hide:] HTTP specific response. - receive: "HealthCheckPayload" = betterproto.message_field(4) - # An optional service name parameter which is used to validate the identity - # of the health checked cluster. See the :ref:`architecture overview - # ` for more information. .. - # attention:: This field has been deprecated in favor of - # `service_name_matcher` for better flexibility over matching with service- - # cluster name. - service_name: str = betterproto.string_field(5) - # Specifies a list of HTTP headers that should be added to each request that - # is sent to the health checked cluster. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - request_headers_to_add: List["HeaderValueOption"] = betterproto.message_field(6) - # Specifies a list of HTTP headers that should be removed from each request - # that is sent to the health checked cluster. - request_headers_to_remove: List[str] = betterproto.string_field(8) - # If set, health checks will be made using http/2. Deprecated, use - # :ref:`codec_client_type - # ` - # instead. - use_http2: bool = betterproto.bool_field(7) - # Specifies a list of HTTP response statuses considered healthy. If provided, - # replaces default 200-only policy - 200 must be included explicitly as - # needed. Ranges follow half-open semantics of :ref:`Int64Range - # `. The start and end of each range are - # required. Only statuses in the range [100, 600) are allowed. - expected_statuses: List["___type__.Int64Range"] = betterproto.message_field(9) - # Use specified application protocol for health checks. - codec_client_type: "___type__.CodecClientType" = betterproto.enum_field(10) - # An optional service name parameter which is used to validate the identity - # of the health checked cluster using a :ref:`StringMatcher - # `. See the :ref:`architecture - # overview ` for more information. - service_name_matcher: "___type_matcher__.StringMatcher" = betterproto.message_field( - 11 - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.service_name: - warnings.warn( - "HealthCheckHttpHealthCheck.service_name is deprecated", - DeprecationWarning, - ) - if self.use_http2: - warnings.warn( - "HealthCheckHttpHealthCheck.use_http2 is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class HealthCheckTcpHealthCheck(betterproto.Message): - # Empty payloads imply a connect-only health check. - send: "HealthCheckPayload" = betterproto.message_field(1) - # When checking the response, “fuzzy” matching is performed such that each - # binary block must be found, and in the order specified, but not necessarily - # contiguous. - receive: List["HealthCheckPayload"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HealthCheckRedisHealthCheck(betterproto.Message): - # If set, optionally perform ``EXISTS `` instead of ``PING``. A return - # value from Redis of 0 (does not exist) is considered a passing healthcheck. - # A return value other than 0 is considered a failure. This allows the user - # to mark a Redis instance for maintenance by setting the specified key to - # any value and waiting for traffic to drain. - key: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckGrpcHealthCheck(betterproto.Message): - """ - `grpc.health.v1.Health `_-based healthcheck. See `gRPC doc - `_ for - details. - """ - - # An optional service name parameter which will be sent to gRPC service in - # `grpc.health.v1.HealthCheckRequest `_. message. See `gRPC health- - # checking overview `_ for more information. - service_name: str = betterproto.string_field(1) - # The value of the :authority header in the gRPC health check request. If - # left empty (default value), the name of the cluster this health check is - # associated with will be used. The authority header can be customized for a - # specific endpoint by setting the :ref:`hostname - # ` field. - authority: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class HealthCheckCustomHealthCheck(betterproto.Message): - """Custom health check.""" - - # The registered name of the custom health checker. - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn( - "HealthCheckCustomHealthCheck.config is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class HealthCheckTlsOptions(betterproto.Message): - """ - Health checks occur over the transport socket specified for the cluster. - This implies that if a cluster is using a TLS-enabled transport socket, the - health check will also occur over TLS. This allows overriding the cluster - TLS settings, just for health check connections. - """ - - # Specifies the ALPN protocols for health check connections. This is useful - # if the corresponding upstream is using ALPN-based :ref:`FilterChainMatch - # ` along with different protocols - # for health checks versus data connections. If empty, no ALPN protocols will - # be set on health check connections. - alpn_protocols: List[str] = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class TcpProtocolOptions(betterproto.Message): - """[#not-implemented-hide:]""" - - pass - - -@dataclass(eq=False, repr=False) -class UpstreamHttpProtocolOptions(betterproto.Message): - # Set transport socket `SNI - # `_ for new upstream - # connections based on the downstream HTTP host/authority header, as seen by - # the :ref:`router filter `. - auto_sni: bool = betterproto.bool_field(1) - # Automatic validate upstream presented certificate for new upstream - # connections based on the downstream HTTP host/authority header, as seen by - # the :ref:`router filter `. This field is - # intended to set with `auto_sni` field. - auto_san_validation: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class HttpProtocolOptions(betterproto.Message): - """[#next-free-field: 6]""" - - # The idle timeout for connections. The idle timeout is defined as the period - # in which there are no active requests. When the idle timeout is reached the - # connection will be closed. If the connection is an HTTP/2 downstream - # connection a drain sequence will occur prior to closing the connection, see - # :ref:`drain_timeout `. Note that request based - # timeouts mean that HTTP/2 PINGs will not keep the connection alive. If not - # specified, this defaults to 1 hour. To disable idle timeouts explicitly set - # this to 0. .. warning:: Disabling this timeout has a highly likelihood of - # yielding connection leaks due to lost TCP FIN packets, etc. - idle_timeout: timedelta = betterproto.message_field(1) - # The maximum duration of a connection. The duration is defined as a period - # since a connection was established. If not set, there is no max duration. - # When max_connection_duration is reached the connection will be closed. - # Drain sequence will occur prior to closing the connection if if's - # applicable. See :ref:`drain_timeout `. Note: not - # implemented for upstream connections. - max_connection_duration: timedelta = betterproto.message_field(3) - # The maximum number of headers. If unconfigured, the default maximum number - # of request headers allowed is 100. Requests that exceed this limit will - # receive a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - max_headers_count: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Total duration to keep alive an HTTP request/response stream. If the time - # limit is reached the stream will be reset independent of any other - # timeouts. If not specified, this value is not set. - max_stream_duration: timedelta = betterproto.message_field(4) - # Action to take when a client request with a header name containing - # underscore characters is received. If this setting is not specified, the - # value defaults to ALLOW. Note: upstream responses are not affected by this - # setting. - headers_with_underscores_action: "HttpProtocolOptionsHeadersWithUnderscoresAction" = betterproto.enum_field( - 5 - ) - - -@dataclass(eq=False, repr=False) -class Http1ProtocolOptions(betterproto.Message): - """[#next-free-field: 6]""" - - # Handle HTTP requests with absolute URLs in the requests. These requests are - # generally sent by clients to forward/explicit proxies. This allows clients - # to configure envoy as their HTTP proxy. In Unix, for example, this is - # typically done by setting the *http_proxy* environment variable. - allow_absolute_url: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # Handle incoming HTTP/1.0 and HTTP 0.9 requests. This is off by default, and - # not fully standards compliant. There is support for pre-HTTP/1.1 style - # connect logic, dechunking, and handling lack of client host iff - # *default_host_for_http_10* is configured. - accept_http_10: bool = betterproto.bool_field(2) - # A default host for HTTP/1.0 requests. This is highly suggested if - # *accept_http_10* is true as Envoy does not otherwise support HTTP/1.0 - # without a Host header. This is a no-op if *accept_http_10* is not true. - default_host_for_http_10: str = betterproto.string_field(3) - # Describes how the keys for response headers should be formatted. By - # default, all header keys are lower cased. - header_key_format: "Http1ProtocolOptionsHeaderKeyFormat" = ( - betterproto.message_field(4) - ) - # Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied - # trailers. .. attention:: Note that this only happens when Envoy is chunk - # encoding which occurs when: - The request is HTTP/1.1. - Is neither a - # HEAD only request nor a HTTP Upgrade. - Not a response to a HEAD request. - # - The content length header is not present. - enable_trailers: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class Http1ProtocolOptionsHeaderKeyFormat(betterproto.Message): - # Formats the header by proper casing words: the first character and any - # character following a special character will be capitalized if it's an - # alpha character. For example, "content-type" becomes "Content-Type", and - # "foo$b#$are" becomes "Foo$B#$Are". Note that while this results in most - # headers following conventional casing, certain headers are not covered. For - # example, the "TE" header will be formatted as "Te". - proper_case_words: "Http1ProtocolOptionsHeaderKeyFormatProperCaseWords" = ( - betterproto.message_field(1, group="header_format") - ) - - -@dataclass(eq=False, repr=False) -class Http1ProtocolOptionsHeaderKeyFormatProperCaseWords(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class Http2ProtocolOptions(betterproto.Message): - """[#next-free-field: 14]""" - - # `Maximum table size - # `_ (in octets) that - # the encoder is permitted to use for the dynamic HPACK table. Valid values - # range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively - # disables header compression. - hpack_table_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # `Maximum concurrent streams - # `_ allowed for - # peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 - # (2^31 - 1) and defaults to 2147483647. For upstream connections, this also - # limits how many streams Envoy will initiate concurrently on a single - # connection. If the limit is reached, Envoy may queue requests or establish - # additional connections (as allowed per circuit breaker limits). - max_concurrent_streams: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # `Initial stream-level flow-control window - # `_ size. Valid - # values range from 65535 (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, - # HTTP/2 maximum) and defaults to 268435456 (256 * 1024 * 1024). NOTE: 65535 - # is the initial window size from HTTP/2 spec. We only support increasing the - # default window size now, so it's also the minimum. This field also acts as - # a soft limit on the number of bytes Envoy will buffer per-stream in the - # HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark - # callbacks will fire to stop the flow of data to the codec buffers. - initial_stream_window_size: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Similar to *initial_stream_window_size*, but for connection-level flow- - # control window. Currently, this has the same minimum/maximum/default as - # *initial_stream_window_size*. - initial_connection_window_size: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Allows proxying Websocket and other upgrades over H2 connect. - allow_connect: bool = betterproto.bool_field(5) - # [#not-implemented-hide:] Hiding until envoy has full metadata support. - # Still under implementation. DO NOT USE. Allows metadata. See [metadata docs - # ](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) - # for more information. - allow_metadata: bool = betterproto.bool_field(6) - # Limit the number of pending outbound downstream frames of all types (frames - # that are waiting to be written into the socket). Exceeding this limit - # triggers flood mitigation and connection is terminated. The - # ``http2.outbound_flood`` stat tracks the number of terminated connections - # due to flood mitigation. The default limit is 10000. [#comment:TODO: - # implement same limits for upstream outbound frames as well.] - max_outbound_frames: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # Limit the number of pending outbound downstream frames of types PING, - # SETTINGS and RST_STREAM, preventing high memory utilization when receiving - # continuous stream of these frames. Exceeding this limit triggers flood - # mitigation and connection is terminated. The - # ``http2.outbound_control_flood`` stat tracks the number of terminated - # connections due to flood mitigation. The default limit is 1000. - # [#comment:TODO: implement same limits for upstream outbound frames as - # well.] - max_outbound_control_frames: Optional[int] = betterproto.message_field( - 8, wraps=betterproto.TYPE_UINT32 - ) - # Limit the number of consecutive inbound frames of types HEADERS, - # CONTINUATION and DATA with an empty payload and no end stream flag. Those - # frames have no legitimate use and are abusive, but might be a result of a - # broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` stat - # tracks the number of connections terminated due to flood mitigation. - # Setting this to 0 will terminate connection upon receiving first frame with - # an empty payload and no end stream flag. The default limit is 1. - # [#comment:TODO: implement same limits for upstream inbound frames as well.] - max_consecutive_inbound_frames_with_empty_payload: Optional[ - int - ] = betterproto.message_field(9, wraps=betterproto.TYPE_UINT32) - # Limit the number of inbound PRIORITY frames allowed per each opened stream. - # If the number of PRIORITY frames received over the lifetime of connection - # exceeds the value calculated using this formula:: - # max_inbound_priority_frames_per_stream * (1 + inbound_streams) the - # connection is terminated. The ``http2.inbound_priority_frames_flood`` stat - # tracks the number of connections terminated due to flood mitigation. The - # default limit is 100. [#comment:TODO: implement same limits for upstream - # inbound frames as well.] - max_inbound_priority_frames_per_stream: Optional[int] = betterproto.message_field( - 10, wraps=betterproto.TYPE_UINT32 - ) - # Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame - # sent. If the number of WINDOW_UPDATE frames received over the lifetime of - # connection exceeds the value calculated using this formula:: 1 + 2 * - # (inbound_streams + - # max_inbound_window_update_frames_per_data_frame_sent * - # outbound_data_frames) the connection is terminated. The - # ``http2.inbound_priority_frames_flood`` stat tracks the number of - # connections terminated due to flood mitigation. The default limit is 10. - # Setting this to 1 should be enough to support HTTP/2 implementations with - # basic flow control, but more complex implementations that try to estimate - # available bandwidth require at least 2. [#comment:TODO: implement same - # limits for upstream inbound frames as well.] - max_inbound_window_update_frames_per_data_frame_sent: Optional[ - int - ] = betterproto.message_field(11, wraps=betterproto.TYPE_UINT32) - # Allows invalid HTTP messaging and headers. When this option is disabled - # (default), then the whole HTTP/2 connection is terminated upon receiving - # invalid HEADERS frame. However, when this option is enabled, only the - # offending stream is terminated. See `RFC7540, sec. 8.1 - # `_ for details. - stream_error_on_invalid_http_messaging: bool = betterproto.bool_field(12) - # [#not-implemented-hide:] Specifies SETTINGS frame parameters to be sent to - # the peer, with two exceptions: 1. SETTINGS_ENABLE_PUSH (0x2) is not - # configurable as HTTP/2 server push is not supported by Envoy. 2. - # SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the - # named field 'allow_connect'. Note that custom parameters specified through - # this field can not also be set in the corresponding named parameters: .. - # code-block:: text ID Field Name ---------------- 0x1 - # hpack_table_size 0x3 max_concurrent_streams 0x4 - # initial_stream_window_size Collisions will trigger config validation - # failure on load/update. Likewise, inconsistencies between custom parameters - # with the same identifier will trigger a failure. See `IANA HTTP/2 Settings - # `_ for standardized identifiers. - custom_settings_parameters: List[ - "Http2ProtocolOptionsSettingsParameter" - ] = betterproto.message_field(13) - - -@dataclass(eq=False, repr=False) -class Http2ProtocolOptionsSettingsParameter(betterproto.Message): - """ - Defines a parameter to be sent in the SETTINGS frame. See `RFC7540, sec. - 6.5.1 `_ for details. - """ - - # The 16 bit parameter identifier. - identifier: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # The 32 bit parameter value. - value: Optional[int] = betterproto.message_field(2, wraps=betterproto.TYPE_UINT32) - - -@dataclass(eq=False, repr=False) -class GrpcProtocolOptions(betterproto.Message): - """[#not-implemented-hide:]""" - - http2_protocol_options: "Http2ProtocolOptions" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcMethodList(betterproto.Message): - """ - A list of gRPC methods which can be used as an allowlist, for example. - """ - - services: List["GrpcMethodListService"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcMethodListService(betterproto.Message): - # The name of the gRPC service. - name: str = betterproto.string_field(1) - # The names of the gRPC methods in this service. - method_names: List[str] = betterproto.string_field(2) - - -from .... import type as ___type__ -from ....type import matcher as ___type_matcher__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/api/v2/endpoint/__init__.py b/src/envoy_data_plane/envoy/api/v2/endpoint/__init__.py deleted file mode 100644 index 605e71f..0000000 --- a/src/envoy_data_plane/envoy/api/v2/endpoint/__init__.py +++ /dev/null @@ -1,261 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/endpoint/endpoint.proto, envoy/api/v2/endpoint/endpoint_components.proto, envoy/api/v2/endpoint/load_report.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Endpoint(betterproto.Message): - """Upstream host identifier.""" - - # The upstream host address. .. attention:: The form of host address - # depends on the given cluster type. For STATIC or EDS, it is expected to - # be a direct IP address (or something resolvable by the specified - # :ref:`resolver ` in the - # Address). For LOGICAL or STRICT DNS, it is expected to be hostname, and - # will be resolved via DNS. - address: "_core__.Address" = betterproto.message_field(1) - # The optional health check configuration is used as configuration for the - # health checker to contact the health checked host. .. attention:: This - # takes into effect only for upstream clusters with :ref:`active health - # checking ` enabled. - health_check_config: "EndpointHealthCheckConfig" = betterproto.message_field(2) - # The hostname associated with this endpoint. This hostname is not used for - # routing or address resolution. If provided, it will be associated with the - # endpoint, and can be used for features that require a hostname, like - # :ref:`auto_host_rewrite - # `. - hostname: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class EndpointHealthCheckConfig(betterproto.Message): - """The optional health check configuration.""" - - # Optional alternative health check port value. By default the health check - # address port of an upstream host is the same as the host's serving address - # port. This provides an alternative health check port. Setting this with a - # non-zero value allows an upstream host to have different health check - # address port. - port_value: int = betterproto.uint32_field(1) - # By default, the host header for L7 health checks is controlled by cluster - # level configuration (see: :ref:`host - # ` and - # :ref:`authority - # `). Setting - # this to a non-empty value allows overriding the cluster level configuration - # for a specific endpoint. - hostname: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class LbEndpoint(betterproto.Message): - """An Endpoint that Envoy can route traffic to. [#next-free-field: 6]""" - - endpoint: "Endpoint" = betterproto.message_field(1, group="host_identifier") - # [#not-implemented-hide:] - endpoint_name: str = betterproto.string_field(5, group="host_identifier") - # Optional health status when known and supplied by EDS server. - health_status: "_core__.HealthStatus" = betterproto.enum_field(2) - # The endpoint metadata specifies values that may be used by the load - # balancer to select endpoints in a cluster for a given request. The filter - # name should be specified as *envoy.lb*. An example boolean key-value pair - # is *canary*, providing the optional canary status of the upstream host. - # This may be matched against in a route's :ref:`RouteAction - # ` metadata_match field to subset the - # endpoints considered in cluster load balancing. - metadata: "_core__.Metadata" = betterproto.message_field(3) - # The optional load balancing weight of the upstream host; at least 1. Envoy - # uses the load balancing weight in some of the built in load balancers. The - # load balancing weight for an endpoint is divided by the sum of the weights - # of all endpoints in the endpoint's locality to produce a percentage of - # traffic for the endpoint. This percentage is then further weighted by the - # endpoint's locality's load balancing weight from LocalityLbEndpoints. If - # unspecified, each host is presumed to have equal weight in a locality. The - # sum of the weights of all endpoints in the endpoint's locality must not - # exceed uint32_t maximal value (4294967295). - load_balancing_weight: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class LocalityLbEndpoints(betterproto.Message): - """ - A group of endpoints belonging to a Locality. One can have multiple - LocalityLbEndpoints for a locality, but this is generally only done if the - different groups need to have different load balancing weights or different - priorities. [#next-free-field: 7] - """ - - # Identifies location of where the upstream hosts run. - locality: "_core__.Locality" = betterproto.message_field(1) - # The group of endpoints belonging to the locality specified. - lb_endpoints: List["LbEndpoint"] = betterproto.message_field(2) - # Optional: Per priority/region/zone/sub_zone weight; at least 1. The load - # balancing weight for a locality is divided by the sum of the weights of all - # localities at the same priority level to produce the effective percentage - # of traffic for the locality. The sum of the weights of all localities at - # the same priority level must not exceed uint32_t maximal value - # (4294967295). Locality weights are only considered when :ref:`locality - # weighted load balancing - # ` is configured. These - # weights are ignored otherwise. If no weights are specified when locality - # weighted load balancing is enabled, the locality is assigned no load. - load_balancing_weight: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Optional: the priority for this LocalityLbEndpoints. If unspecified this - # will default to the highest priority (0). Under usual circumstances, Envoy - # will only select endpoints for the highest priority (0). In the event all - # endpoints for a particular priority are unavailable/unhealthy, Envoy will - # fail over to selecting endpoints for the next highest priority group. - # Priorities should range from 0 (highest) to N (lowest) without skipping. - priority: int = betterproto.uint32_field(5) - # Optional: Per locality proximity value which indicates how close this - # locality is from the source locality. This value only provides ordering - # information (lower the value, closer it is to the source locality). This - # will be consumed by load balancing schemes that need proximity order to - # determine where to route the requests. [#not-implemented-hide:] - proximity: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class UpstreamLocalityStats(betterproto.Message): - """ - These are stats Envoy reports to GLB every so often. Report frequency is - defined by :ref:`LoadStatsResponse.load_reporting_interval`. Stats - per upstream region/zone and optionally per subzone. [#not-implemented- - hide:] Not configuration. TBD how to doc proto APIs. [#next-free-field: 9] - """ - - # Name of zone, region and optionally endpoint group these metrics were - # collected from. Zone and region names could be empty if unknown. - locality: "_core__.Locality" = betterproto.message_field(1) - # The total number of requests successfully completed by the endpoints in the - # locality. - total_successful_requests: int = betterproto.uint64_field(2) - # The total number of unfinished requests - total_requests_in_progress: int = betterproto.uint64_field(3) - # The total number of requests that failed due to errors at the endpoint, - # aggregated over all endpoints in the locality. - total_error_requests: int = betterproto.uint64_field(4) - # The total number of requests that were issued by this Envoy since the last - # report. This information is aggregated over all the upstream endpoints in - # the locality. - total_issued_requests: int = betterproto.uint64_field(8) - # Stats for multi-dimensional load balancing. - load_metric_stats: List["EndpointLoadMetricStats"] = betterproto.message_field(5) - # Endpoint granularity stats information for this locality. This information - # is populated if the Server requests it by setting :ref:`LoadStatsResponse.r - # eport_endpoint_granularity`. - upstream_endpoint_stats: List["UpstreamEndpointStats"] = betterproto.message_field( - 7 - ) - # [#not-implemented-hide:] The priority of the endpoint group these metrics - # were collected from. - priority: int = betterproto.uint32_field(6) - - -@dataclass(eq=False, repr=False) -class UpstreamEndpointStats(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. - [#next-free-field: 8] - """ - - # Upstream host address. - address: "_core__.Address" = betterproto.message_field(1) - # Opaque and implementation dependent metadata of the endpoint. Envoy will - # pass this directly to the management server. - metadata: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(6) - # The total number of requests successfully completed by the endpoints in the - # locality. These include non-5xx responses for HTTP, where errors originate - # at the client and the endpoint responded successfully. For gRPC, the grpc- - # status values are those not covered by total_error_requests below. - total_successful_requests: int = betterproto.uint64_field(2) - # The total number of unfinished requests for this endpoint. - total_requests_in_progress: int = betterproto.uint64_field(3) - # The total number of requests that failed due to errors at the endpoint. For - # HTTP these are responses with 5xx status codes and for gRPC the grpc-status - # values: - DeadlineExceeded - Unimplemented - Internal - Unavailable - # - Unknown - DataLoss - total_error_requests: int = betterproto.uint64_field(4) - # The total number of requests that were issued to this endpoint since the - # last report. A single TCP connection, HTTP or gRPC request or stream is - # counted as one request. - total_issued_requests: int = betterproto.uint64_field(7) - # Stats for multi-dimensional load balancing. - load_metric_stats: List["EndpointLoadMetricStats"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class EndpointLoadMetricStats(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. - """ - - # Name of the metric; may be empty. - metric_name: str = betterproto.string_field(1) - # Number of calls that finished and included this metric. - num_requests_finished_with_metric: int = betterproto.uint64_field(2) - # Sum of metric values across all calls that finished with this metric for - # load_reporting_interval. - total_metric_value: float = betterproto.double_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterStats(betterproto.Message): - """ - Per cluster load stats. Envoy reports these stats a management server in a - :ref:`LoadStatsRequest` [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. - Next ID: 7 [#next-free-field: 7] - """ - - # The name of the cluster. - cluster_name: str = betterproto.string_field(1) - # The eds_cluster_config service_name of the cluster. It's possible that two - # clusters send the same service_name to EDS, in that case, the management - # server is supposed to do aggregation on the load reports. - cluster_service_name: str = betterproto.string_field(6) - # Need at least one. - upstream_locality_stats: List["UpstreamLocalityStats"] = betterproto.message_field( - 2 - ) - # Cluster-level stats such as total_successful_requests may be computed by - # summing upstream_locality_stats. In addition, below there are additional - # cluster-wide stats. The total number of dropped requests. This covers - # requests deliberately dropped by the drop_overload policy and circuit - # breaking. - total_dropped_requests: int = betterproto.uint64_field(3) - # Information about deliberately dropped requests for each category specified - # in the DropOverload policy. - dropped_requests: List["ClusterStatsDroppedRequests"] = betterproto.message_field(5) - # Period over which the actual load report occurred. This will be guaranteed - # to include every request reported. Due to system load and delays between - # the *LoadStatsRequest* sent from Envoy and the *LoadStatsResponse* message - # sent from the management server, this may be longer than the requested load - # reporting interval in the *LoadStatsResponse*. - load_report_interval: timedelta = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterStatsDroppedRequests(betterproto.Message): - # Identifier for the policy specifying the drop. - category: str = betterproto.string_field(1) - # Total number of deliberately dropped requests for the category. - dropped_count: int = betterproto.uint64_field(2) - - -from .. import core as _core__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/api/v2/listener/__init__.py b/src/envoy_data_plane/envoy/api/v2/listener/__init__.py deleted file mode 100644 index 486e61a..0000000 --- a/src/envoy_data_plane/envoy/api/v2/listener/__init__.py +++ /dev/null @@ -1,283 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/listener/listener.proto, envoy/api/v2/listener/listener_components.proto, envoy/api/v2/listener/quic_config.proto, envoy/api/v2/listener/udp_listener_config.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class FilterChainMatchConnectionSourceType(betterproto.Enum): - ANY = 0 - LOCAL = 1 - EXTERNAL = 2 - - -@dataclass(eq=False, repr=False) -class Filter(betterproto.Message): - # The name of the filter to instantiate. The name must match a - # :ref:`supported filter `. - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 4, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("Filter.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class FilterChainMatch(betterproto.Message): - """ - Specifies the match criteria for selecting a specific filter chain for a - listener. In order for a filter chain to be selected, *ALL* of its criteria - must be fulfilled by the incoming connection, properties of which are set - by the networking stack and/or listener filters. The following order - applies: 1. Destination port. 2. Destination IP address. 3. Server name - (e.g. SNI for TLS protocol), 4. Transport protocol. 5. Application - protocols (e.g. ALPN for TLS protocol). 6. Source type (e.g. any, local or - external network). 7. Source IP address. 8. Source port. For criteria that - allow ranges or wildcards, the most specific value in any of the configured - filter chains that matches the incoming connection is going to be used - (e.g. for SNI ``www.example.com`` the most specific match would be - ``www.example.com``, then ``*.example.com``, then ``*.com``, then any - filter chain without ``server_names`` requirements). [#comment: Implemented - rules are kept in the preference order, with deprecated fields listed at - the end, because that's how we want to list them in the docs. - [#comment:TODO(PiotrSikora): Add support for configurable precedence of the - rules] [#next-free-field: 13] - """ - - # Optional destination port to consider when use_original_dst is set on the - # listener in determining a filter chain match. - destination_port: Optional[int] = betterproto.message_field( - 8, wraps=betterproto.TYPE_UINT32 - ) - # If non-empty, an IP address and prefix length to match addresses when the - # listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - prefix_ranges: List["_core__.CidrRange"] = betterproto.message_field(3) - # If non-empty, an IP address and suffix length to match addresses when the - # listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - # [#not-implemented-hide:] - address_suffix: str = betterproto.string_field(4) - # [#not-implemented-hide:] - suffix_len: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # Specifies the connection source IP match type. Can be any, local or - # external network. - source_type: "FilterChainMatchConnectionSourceType" = betterproto.enum_field(12) - # The criteria is satisfied if the source IP address of the downstream - # connection is contained in at least one of the specified subnets. If the - # parameter is not specified or the list is empty, the source IP address is - # ignored. - source_prefix_ranges: List["_core__.CidrRange"] = betterproto.message_field(6) - # The criteria is satisfied if the source port of the downstream connection - # is contained in at least one of the specified ports. If the parameter is - # not specified, the source port is ignored. - source_ports: List[int] = betterproto.uint32_field(7) - # If non-empty, a list of server names (e.g. SNI for TLS protocol) to - # consider when determining a filter chain match. Those values will be - # compared against the server names of a new connection, when detected by one - # of the listener filters. The server name will be matched against all - # wildcard domains, i.e. ``www.example.com`` will be first matched against - # ``www.example.com``, then ``*.example.com``, then ``*.com``. Note that - # partial wildcards are not supported, and values like ``*w.example.com`` are - # invalid. .. attention:: See the :ref:`FAQ entry ` - # on how to configure SNI for more information. - server_names: List[str] = betterproto.string_field(11) - # If non-empty, a transport protocol to consider when determining a filter - # chain match. This value will be compared against the transport protocol of - # a new connection, when it's detected by one of the listener filters. - # Suggested values include: * ``raw_buffer`` - default, used when no - # transport protocol is detected, * ``tls`` - set by - # :ref:`envoy.filters.listener.tls_inspector - # ` when TLS protocol is detected. - transport_protocol: str = betterproto.string_field(9) - # If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) - # to consider when determining a filter chain match. Those values will be - # compared against the application protocols of a new connection, when - # detected by one of the listener filters. Suggested values include: * - # ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - # `, * ``h2`` - set by - # :ref:`envoy.filters.listener.tls_inspector - # ` .. attention:: Currently, only - # :ref:`TLS Inspector ` provides - # application protocol detection based on the requested `ALPN - # `_ - # values. However, the use of ALPN is pretty much limited to the HTTP/2 - # traffic on the Internet, and matching on values other than ``h2`` is - # going to lead to a lot of false negatives, unless all connecting clients - # are known to use ALPN. - application_protocols: List[str] = betterproto.string_field(10) - - -@dataclass(eq=False, repr=False) -class FilterChain(betterproto.Message): - """ - A filter chain wraps a set of match criteria, an option TLS context, a set - of filters, and various other parameters. [#next-free-field: 8] - """ - - # The criteria to use when matching a connection to this filter chain. - filter_chain_match: "FilterChainMatch" = betterproto.message_field(1) - # The TLS context for this filter chain. .. attention:: **This field is - # deprecated**. Use `transport_socket` with name `tls` instead. If both are - # set, `transport_socket` takes priority. - tls_context: "_auth__.DownstreamTlsContext" = betterproto.message_field(2) - # A list of individual network filters that make up the filter chain for - # connections established with the listener. Order matters as the filters are - # processed sequentially as connection events happen. Note: If the filter - # list is empty, the connection will close by default. - filters: List["Filter"] = betterproto.message_field(3) - # Whether the listener should expect a PROXY protocol V1 header on new - # connections. If this option is enabled, the listener will assume that that - # remote address of the connection is the one specified in the header. Some - # load balancers including the AWS ELB support this option. If the option is - # absent or set to false, Envoy will use the physical peer address of the - # connection as the remote address. - use_proxy_proto: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # [#not-implemented-hide:] filter chain metadata. - metadata: "_core__.Metadata" = betterproto.message_field(5) - # Optional custom transport socket implementation to use for downstream - # connections. To setup TLS, set a transport socket with name `tls` and - # :ref:`DownstreamTlsContext ` in - # the `typed_config`. If no transport socket configuration is specified, new - # connections will be set up with plaintext. - transport_socket: "_core__.TransportSocket" = betterproto.message_field(6) - # [#not-implemented-hide:] The unique name (or empty) by which this filter - # chain is known. If no name is provided, Envoy will allocate an internal - # UUID for the filter chain. If the filter chain is to be dynamically updated - # or removed via FCDS a unique name must be provided. - name: str = betterproto.string_field(7) - - def __post_init__(self) -> None: - super().__post_init__() - if self.tls_context: - warnings.warn("FilterChain.tls_context is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ListenerFilterChainMatchPredicate(betterproto.Message): - """ - Listener filter chain match configuration. This is a recursive structure - which allows complex nested match configurations to be built using various - logical operators. Examples: * Matches if the destination port is 3306. .. - code-block:: yaml destination_port_range: start: 3306 end: 3307 * - Matches if the destination port is 3306 or 15000. .. code-block:: yaml - or_match: rules: - destination_port_range: start: 3306 - end: 3307 - destination_port_range: start: 15000 - end: 15001 [#next-free-field: 6] - """ - - # A set that describes a logical OR. If any member of the set matches, the - # match configuration matches. - or_match: "ListenerFilterChainMatchPredicateMatchSet" = betterproto.message_field( - 1, group="rule" - ) - # A set that describes a logical AND. If all members of the set match, the - # match configuration matches. - and_match: "ListenerFilterChainMatchPredicateMatchSet" = betterproto.message_field( - 2, group="rule" - ) - # A negation match. The match configuration will match if the negated match - # condition matches. - not_match: "ListenerFilterChainMatchPredicate" = betterproto.message_field( - 3, group="rule" - ) - # The match configuration will always match. - any_match: bool = betterproto.bool_field(4, group="rule") - # Match destination port. Particularly, the match evaluation must use the - # recovered local port if the owning listener filter is after :ref:`an - # original_dst listener filter `. - destination_port_range: "___type__.Int32Range" = betterproto.message_field( - 5, group="rule" - ) - - -@dataclass(eq=False, repr=False) -class ListenerFilterChainMatchPredicateMatchSet(betterproto.Message): - """A set of match configurations used for logical operations.""" - - # The list of rules that make up the set. - rules: List["ListenerFilterChainMatchPredicate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ListenerFilter(betterproto.Message): - # The name of the filter to instantiate. The name must match a - # :ref:`supported filter `. - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - # Optional match predicate used to disable the filter. The filter is enabled - # when this field is empty. See :ref:`ListenerFilterChainMatchPredicate - # ` for further - # examples. - filter_disabled: "ListenerFilterChainMatchPredicate" = betterproto.message_field(4) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("ListenerFilter.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class UdpListenerConfig(betterproto.Message): - # Used to look up UDP listener factory, matches "raw_udp_listener" or - # "quic_listener" to create a specific udp listener. If not specified, treat - # as "raw_udp_listener". - udp_listener_name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("UdpListenerConfig.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ActiveRawUdpListenerConfig(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class QuicProtocolOptions(betterproto.Message): - """Configuration specific to the QUIC protocol. Next id: 4""" - - # Maximum number of streams that the client can negotiate per connection. 100 - # if not specified. - max_concurrent_streams: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Maximum number of milliseconds that connection will be alive when there is - # no network activity. 300000ms if not specified. - idle_timeout: timedelta = betterproto.message_field(2) - # Connection timeout in milliseconds before the crypto handshake is finished. - # 20000ms if not specified. - crypto_handshake_timeout: timedelta = betterproto.message_field(3) - - -from .. import auth as _auth__ -from .. import core as _core__ -from .... import type as ___type__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/api/v2/ratelimit/__init__.py b/src/envoy_data_plane/envoy/api/v2/ratelimit/__init__.py deleted file mode 100644 index a557a2f..0000000 --- a/src/envoy_data_plane/envoy/api/v2/ratelimit/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/ratelimit/ratelimit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimitDescriptor(betterproto.Message): - """ - A RateLimitDescriptor is a list of hierarchical entries that are used by - the service to determine the final rate limit key and overall allowed - limit. Here are some examples of how they might be used for the domain - "envoy". .. code-block:: cpp ["authenticated": "false"], - ["remote_address": "10.0.0.1"] What it does: Limits all unauthenticated - traffic for the IP address 10.0.0.1. The configuration supplies a default - limit for the *remote_address* key. If there is a desire to raise the limit - for 10.0.0.1 or block it entirely it can be specified directly in the - configuration. .. code-block:: cpp ["authenticated": "false"], ["path": - "/foo/bar"] What it does: Limits all unauthenticated traffic globally for a - specific path (or prefix if configured that way in the service). .. code- - block:: cpp ["authenticated": "false"], ["path": "/foo/bar"], - ["remote_address": "10.0.0.1"] What it does: Limits unauthenticated traffic - to a specific path for a specific IP address. Like (1) we can raise/block - specific IP addresses if we want with an override configuration. .. code- - block:: cpp ["authenticated": "true"], ["client_id": "foo"] What it does: - Limits all traffic for an authenticated client "foo" .. code-block:: cpp - ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] What - it does: Limits traffic to a specific path for an authenticated client - "foo" The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent - in 1 request if desired. This enables building complex application - scenarios with a generic backend. - """ - - # Descriptor entries. - entries: List["RateLimitDescriptorEntry"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class RateLimitDescriptorEntry(betterproto.Message): - # Descriptor key. - key: str = betterproto.string_field(1) - # Descriptor value. - value: str = betterproto.string_field(2) diff --git a/src/envoy_data_plane/envoy/api/v2/route/__init__.py b/src/envoy_data_plane/envoy/api/v2/route/__init__.py deleted file mode 100644 index 6b0dbb6..0000000 --- a/src/envoy_data_plane/envoy/api/v2/route/__init__.py +++ /dev/null @@ -1,1532 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/api/v2/route/route.proto, envoy/api/v2/route/route_components.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class VirtualHostTlsRequirementType(betterproto.Enum): - NONE = 0 - EXTERNAL_ONLY = 1 - ALL = 2 - - -class RouteActionClusterNotFoundResponseCode(betterproto.Enum): - SERVICE_UNAVAILABLE = 0 - NOT_FOUND = 1 - - -class RouteActionInternalRedirectAction(betterproto.Enum): - PASS_THROUGH_INTERNAL_REDIRECT = 0 - HANDLE_INTERNAL_REDIRECT = 1 - - -class RedirectActionRedirectResponseCode(betterproto.Enum): - MOVED_PERMANENTLY = 0 - FOUND = 1 - SEE_OTHER = 2 - TEMPORARY_REDIRECT = 3 - PERMANENT_REDIRECT = 4 - - -@dataclass(eq=False, repr=False) -class VirtualHost(betterproto.Message): - """ - The top level element in the routing configuration is a virtual host. Each - virtual host has a logical name as well as a set of domains that get routed - to it based on the incoming request's host header. This allows a single - listener to service multiple top level domain path trees. Once a virtual - host is selected based on the domain, the routes are processed in order to - see which upstream cluster to route to or whether to perform a redirect. - [#next-free-field: 21] - """ - - # The logical name of the virtual host. This is used when emitting certain - # statistics but is not relevant for routing. - name: str = betterproto.string_field(1) - # A list of domains (host/authority header) that will be matched to this - # virtual host. Wildcard hosts are supported in the suffix or prefix form. - # Domain search order: 1. Exact domain names: ``www.foo.com``. 2. Suffix - # domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. 3. Prefix domain - # wildcards: ``foo.*`` or ``foo-*``. 4. Special wildcard ``*`` matching any - # domain. .. note:: The wildcard will not match the empty string. e.g. - # ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - # The longest wildcards match first. Only a single virtual host in the - # entire route configuration can match on ``*``. A domain must be unique - # across all virtual hosts or the config will fail to load. Domains cannot - # contain control characters. This is validated by the well_known_regex - # HTTP_HEADER_VALUE. - domains: List[str] = betterproto.string_field(2) - # The list of routes that will be matched, in order, for incoming requests. - # The first route that matches will be used. - routes: List["Route"] = betterproto.message_field(3) - # Specifies the type of TLS enforcement the virtual host expects. If this - # option is not specified, there is no TLS requirement for the virtual host. - require_tls: "VirtualHostTlsRequirementType" = betterproto.enum_field(4) - # A list of virtual clusters defined for this virtual host. Virtual clusters - # are used for additional statistics gathering. - virtual_clusters: List["VirtualCluster"] = betterproto.message_field(5) - # Specifies a set of rate limit configurations that will be applied to the - # virtual host. - rate_limits: List["RateLimit"] = betterproto.message_field(6) - # Specifies a list of HTTP headers that should be added to each request - # handled by this virtual host. Headers specified at this level are applied - # after headers from enclosed :ref:`envoy_api_msg_route.Route` and before - # headers from the enclosing :ref:`envoy_api_msg_RouteConfiguration`. For - # more information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - request_headers_to_add: List[ - "_core__.HeaderValueOption" - ] = betterproto.message_field(7) - # Specifies a list of HTTP headers that should be removed from each request - # handled by this virtual host. - request_headers_to_remove: List[str] = betterproto.string_field(13) - # Specifies a list of HTTP headers that should be added to each response - # handled by this virtual host. Headers specified at this level are applied - # after headers from enclosed :ref:`envoy_api_msg_route.Route` and before - # headers from the enclosing :ref:`envoy_api_msg_RouteConfiguration`. For - # more information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - response_headers_to_add: List[ - "_core__.HeaderValueOption" - ] = betterproto.message_field(10) - # Specifies a list of HTTP headers that should be removed from each response - # handled by this virtual host. - response_headers_to_remove: List[str] = betterproto.string_field(11) - # Indicates that the virtual host has a CORS policy. - cors: "CorsPolicy" = betterproto.message_field(8) - # The per_filter_config field can be used to provide virtual host-specific - # configurations for filters. The key should match the filter name, such as - # *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field - # is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. - per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Struct" - ] = betterproto.map_field(12, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # The per_filter_config field can be used to provide virtual host-specific - # configurations for filters. The key should match the filter name, such as - # *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field - # is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. - typed_per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(15, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Decides whether the :ref:`x-envoy-attempt-count - # ` header should be - # included in the upstream request. Setting this option will cause it to - # override any existing header value, so in the case of two Envoys on the - # request path with this option enabled, the upstream will see the attempt - # count as perceived by the second Envoy. Defaults to false. This header is - # unaffected by the :ref:`suppress_envoy_headers ` flag. [#next-major- - # version: rename to include_attempt_count_in_request.] - include_request_attempt_count: bool = betterproto.bool_field(14) - # Decides whether the :ref:`x-envoy-attempt-count - # ` header should be - # included in the downstream response. Setting this option will cause the - # router to override any existing header value, so in the case of two Envoys - # on the request path with this option enabled, the downstream will see the - # attempt count as perceived by the Envoy closest upstream from itself. - # Defaults to false. This header is unaffected by the - # :ref:`suppress_envoy_headers ` flag. - include_attempt_count_in_response: bool = betterproto.bool_field(19) - # Indicates the retry policy for all routes in this virtual host. Note that - # setting a route level entry will take precedence over this config and it'll - # be treated independently (e.g.: values are not inherited). - retry_policy: "RetryPolicy" = betterproto.message_field(16) - # [#not-implemented-hide:] Specifies the configuration for retry policy - # extension. Note that setting a route level entry will take precedence over - # this config and it'll be treated independently (e.g.: values are not - # inherited). :ref:`Retry policy - # ` should not be set if this - # field is used. - retry_policy_typed_config: "betterproto_lib_google_protobuf.Any" = ( - betterproto.message_field(20) - ) - # Indicates the hedge policy for all routes in this virtual host. Note that - # setting a route level entry will take precedence over this config and it'll - # be treated independently (e.g.: values are not inherited). - hedge_policy: "HedgePolicy" = betterproto.message_field(17) - # The maximum bytes which will be buffered for retries and shadowing. If set - # and a route-specific limit is not set, the bytes actually buffered will be - # the minimum value of this and the listener - # per_connection_buffer_limit_bytes. - per_request_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 18, wraps=betterproto.TYPE_UINT32 - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.per_filter_config: - warnings.warn( - "VirtualHost.per_filter_config is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class FilterAction(betterproto.Message): - """A filter-defined action type.""" - - action: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Route(betterproto.Message): - """ - A route is both a specification of how to match a request as well as an - indication of what to do next (e.g., redirect, forward, rewrite, etc.). .. - attention:: Envoy supports routing on HTTP method via :ref:`header - matching `. [#next-free-field: 18] - """ - - # Name for the route. - name: str = betterproto.string_field(14) - # Route matching parameters. - match: "RouteMatch" = betterproto.message_field(1) - # Route request to some upstream cluster. - route: "RouteAction" = betterproto.message_field(2, group="action") - # Return a redirect. - redirect: "RedirectAction" = betterproto.message_field(3, group="action") - # Return an arbitrary HTTP response directly, without proxying. - direct_response: "DirectResponseAction" = betterproto.message_field( - 7, group="action" - ) - # [#not-implemented-hide:] If true, a filter will define the action (e.g., it - # could dynamically generate the RouteAction). - filter_action: "FilterAction" = betterproto.message_field(17, group="action") - # The Metadata field can be used to provide additional information about the - # route. It can be used for configuration, stats, and logging. The metadata - # should go under the filter namespace that will need it. For instance, if - # the metadata is intended for the Router filter, the filter name should be - # specified as *envoy.filters.http.router*. - metadata: "_core__.Metadata" = betterproto.message_field(4) - # Decorator for the matched route. - decorator: "Decorator" = betterproto.message_field(5) - # The per_filter_config field can be used to provide route-specific - # configurations for filters. The key should match the filter name, such as - # *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field - # is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. - per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Struct" - ] = betterproto.map_field(8, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # The typed_per_filter_config field can be used to provide route-specific - # configurations for filters. The key should match the filter name, such as - # *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field - # is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. - typed_per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(13, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Specifies a set of headers that will be added to requests matching this - # route. Headers specified at this level are applied before headers from the - # enclosing :ref:`envoy_api_msg_route.VirtualHost` and - # :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - request_headers_to_add: List[ - "_core__.HeaderValueOption" - ] = betterproto.message_field(9) - # Specifies a list of HTTP headers that should be removed from each request - # matching this route. - request_headers_to_remove: List[str] = betterproto.string_field(12) - # Specifies a set of headers that will be added to responses to requests - # matching this route. Headers specified at this level are applied before - # headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and - # :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - response_headers_to_add: List[ - "_core__.HeaderValueOption" - ] = betterproto.message_field(10) - # Specifies a list of HTTP headers that should be removed from each response - # to requests matching this route. - response_headers_to_remove: List[str] = betterproto.string_field(11) - # Presence of the object defines whether the connection manager's tracing - # configuration is overridden by this route specific instance. - tracing: "Tracing" = betterproto.message_field(15) - # The maximum bytes which will be buffered for retries and shadowing. If set, - # the bytes actually buffered will be the minimum value of this and the - # listener per_connection_buffer_limit_bytes. - per_request_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 16, wraps=betterproto.TYPE_UINT32 - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.per_filter_config: - warnings.warn("Route.per_filter_config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class WeightedCluster(betterproto.Message): - """ - Compared to the :ref:`cluster ` - field that specifies a single upstream cluster as the target of a request, - the :ref:`weighted_clusters - ` option allows for - specification of multiple upstream clusters along with weights that - indicate the percentage of traffic to be forwarded to each cluster. The - router selects an upstream cluster based on the weights. - """ - - # Specifies one or more upstream clusters associated with the route. - clusters: List["WeightedClusterClusterWeight"] = betterproto.message_field(1) - # Specifies the total weight across all clusters. The sum of all cluster - # weights must equal this value, which must be greater than 0. Defaults to - # 100. - total_weight: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Specifies the runtime key prefix that should be used to construct the - # runtime keys associated with each cluster. When the *runtime_key_prefix* is - # specified, the router will look for weights associated with each upstream - # cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - # *cluster[i]* denotes an entry in the clusters array field. If the runtime - # key for the cluster does not exist, the value specified in the - # configuration file will be used as the default weight. See the - # :ref:`runtime documentation ` for how key names map to - # the underlying implementation. - runtime_key_prefix: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class WeightedClusterClusterWeight(betterproto.Message): - """[#next-free-field: 11]""" - - # Name of the upstream cluster. The cluster must exist in the :ref:`cluster - # manager configuration `. - name: str = betterproto.string_field(1) - # An integer between 0 and :ref:`total_weight - # `. When a request - # matches the route, the choice of an upstream cluster is determined by its - # weight. The sum of weights across all entries in the clusters array must - # add up to the total_weight, which defaults to 100. - weight: Optional[int] = betterproto.message_field(2, wraps=betterproto.TYPE_UINT32) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field will be considered for load balancing. Note that this will be - # merged with what's provided in :ref:`RouteAction.metadata_match - # `, with values here - # taking precedence. The filter name should be specified as *envoy.lb*. - metadata_match: "_core__.Metadata" = betterproto.message_field(3) - # Specifies a list of headers to be added to requests when this cluster is - # selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - # Headers specified at this level are applied before headers from the - # enclosing :ref:`envoy_api_msg_route.Route`, - # :ref:`envoy_api_msg_route.VirtualHost`, and - # :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - request_headers_to_add: List[ - "_core__.HeaderValueOption" - ] = betterproto.message_field(4) - # Specifies a list of HTTP headers that should be removed from each request - # when this cluster is selected through the enclosing - # :ref:`envoy_api_msg_route.RouteAction`. - request_headers_to_remove: List[str] = betterproto.string_field(9) - # Specifies a list of headers to be added to responses when this cluster is - # selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - # Headers specified at this level are applied before headers from the - # enclosing :ref:`envoy_api_msg_route.Route`, - # :ref:`envoy_api_msg_route.VirtualHost`, and - # :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - response_headers_to_add: List[ - "_core__.HeaderValueOption" - ] = betterproto.message_field(5) - # Specifies a list of headers to be removed from responses when this cluster - # is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - response_headers_to_remove: List[str] = betterproto.string_field(6) - # The per_filter_config field can be used to provide weighted cluster- - # specific configurations for filters. The key should match the filter name, - # such as *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this - # field is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. - per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Struct" - ] = betterproto.map_field(8, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # The per_filter_config field can be used to provide weighted cluster- - # specific configurations for filters. The key should match the filter name, - # such as *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this - # field is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. - typed_per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(10, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - - def __post_init__(self) -> None: - super().__post_init__() - if self.per_filter_config: - warnings.warn( - "WeightedClusterClusterWeight.per_filter_config is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class RouteMatch(betterproto.Message): - """[#next-free-field: 12]""" - - # If specified, the route is a prefix rule meaning that the prefix must match - # the beginning of the *:path* header. - prefix: str = betterproto.string_field(1, group="path_specifier") - # If specified, the route is an exact path rule meaning that the path must - # exactly match the *:path* header once the query string is removed. - path: str = betterproto.string_field(2, group="path_specifier") - # If specified, the route is a regular expression rule meaning that the regex - # must match the *:path* header once the query string is removed. The entire - # path (without the query string) must match the regex. The rule will not - # match if only a subsequence of the *:path* header matches the regex. The - # regex grammar is defined `here - # `_. Examples: * The - # regex ``/b[io]t`` matches the path */bit* * The regex ``/b[io]t`` matches - # the path */bot* * The regex ``/b[io]t`` does not match the path */bite* * - # The regex ``/b[io]t`` does not match the path */bit/bot* .. attention:: - # This field has been deprecated in favor of `safe_regex` as it is not safe - # for use with untrusted input in all cases. - regex: str = betterproto.string_field(3, group="path_specifier") - # If specified, the route is a regular expression rule meaning that the regex - # must match the *:path* header once the query string is removed. The entire - # path (without the query string) must match the regex. The rule will not - # match if only a subsequence of the *:path* header matches the regex. - # [#next-major-version: In the v3 API we should redo how path specification - # works such that we utilize StringMatcher, and additionally have consistent - # options around whether we strip query strings, do a case sensitive match, - # etc. In the interim it will be too disruptive to deprecate the existing - # options. We should even consider whether we want to do away with - # path_specifier entirely and just rely on a set of header matchers which can - # already match on :path, etc. The issue with that is it is unclear how to - # generically deal with query string stripping. This needs more thought.] - safe_regex: "___type_matcher__.RegexMatcher" = betterproto.message_field( - 10, group="path_specifier" - ) - # Indicates that prefix/path matching should be case sensitive. The default - # is true. - case_sensitive: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # Indicates that the route should additionally match on a runtime key. Every - # time the route is considered for a match, it must also fall under the - # percentage of matches indicated by this field. For some fraction N/D, a - # random number in the range [0,D) is selected. If the number is <= the value - # of the numerator N, or if the key is not present, the default value, the - # router continues to evaluate the remaining match criteria. A - # runtime_fraction route configuration can be used to roll out route changes - # in a gradual manner without full code/config deploys. Refer to the - # :ref:`traffic shifting - # ` docs for - # additional documentation. .. note:: Parsing this field is implemented - # such that the runtime key's data may be represented as a - # FractionalPercent proto represented as JSON/YAML and may also be - # represented as an integer with the assumption that the value is an - # integral percentage out of 100. For instance, a runtime key lookup - # returning the value "42" would parse as a FractionalPercent whose - # numerator is 42 and denominator is HUNDRED. This preserves legacy - # semantics. - runtime_fraction: "_core__.RuntimeFractionalPercent" = betterproto.message_field(9) - # Specifies a set of headers that the route should match on. The router will - # check the request’s headers against all the specified headers in the route - # config. A match will happen if all the headers in the route are present in - # the request with the same values (or based on presence if the value field - # is not in the config). - headers: List["HeaderMatcher"] = betterproto.message_field(6) - # Specifies a set of URL query parameters on which the route should match. - # The router will check the query string from the *path* header against all - # the specified query parameters. If the number of specified query parameters - # is nonzero, they all must match the *path* header's query string for a - # match to occur. - query_parameters: List["QueryParameterMatcher"] = betterproto.message_field(7) - # If specified, only gRPC requests will be matched. The router will check - # that the content-type header has a application/grpc or one of the various - # application/grpc+ values. - grpc: "RouteMatchGrpcRouteMatchOptions" = betterproto.message_field(8) - # If specified, the client tls context will be matched against the defined - # match options. [#next-major-version: unify with RBAC] - tls_context: "RouteMatchTlsContextMatchOptions" = betterproto.message_field(11) - - def __post_init__(self) -> None: - super().__post_init__() - if self.regex: - warnings.warn("RouteMatch.regex is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class RouteMatchGrpcRouteMatchOptions(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class RouteMatchTlsContextMatchOptions(betterproto.Message): - # If specified, the route will match against whether or not a certificate is - # presented. If not specified, certificate presentation status (true or - # false) will not be considered when route matching. - presented: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # If specified, the route will match against whether or not a certificate is - # validated. If not specified, certificate validation status (true or false) - # will not be considered when route matching. - validated: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class CorsPolicy(betterproto.Message): - """[#next-free-field: 12]""" - - # Specifies the origins that will be allowed to do CORS requests. An origin - # is allowed if either allow_origin or allow_origin_regex match. .. - # attention:: This field has been deprecated in favor of - # `allow_origin_string_match`. - allow_origin: List[str] = betterproto.string_field(1) - # Specifies regex patterns that match allowed origins. An origin is allowed - # if either allow_origin or allow_origin_regex match. .. attention:: This - # field has been deprecated in favor of `allow_origin_string_match` as it is - # not safe for use with untrusted input in all cases. - allow_origin_regex: List[str] = betterproto.string_field(8) - # Specifies string patterns that match allowed origins. An origin is allowed - # if any of the string matchers match. - allow_origin_string_match: List[ - "___type_matcher__.StringMatcher" - ] = betterproto.message_field(11) - # Specifies the content for the *access-control-allow-methods* header. - allow_methods: str = betterproto.string_field(2) - # Specifies the content for the *access-control-allow-headers* header. - allow_headers: str = betterproto.string_field(3) - # Specifies the content for the *access-control-expose-headers* header. - expose_headers: str = betterproto.string_field(4) - # Specifies the content for the *access-control-max-age* header. - max_age: str = betterproto.string_field(5) - # Specifies whether the resource allows credentials. - allow_credentials: Optional[bool] = betterproto.message_field( - 6, wraps=betterproto.TYPE_BOOL - ) - # Specifies if the CORS filter is enabled. Defaults to true. Only effective - # on route. .. attention:: **This field is deprecated**. Set the - # :ref:`filter_enabled` - # field instead. - enabled: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL, group="enabled_specifier" - ) - # Specifies the % of requests for which the CORS filter is enabled. If - # neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are - # specified, the CORS filter will be enabled for 100% of the requests. If - # :ref:`runtime_key - # ` is specified, - # Envoy will lookup the runtime key to get the percentage of requests to - # filter. - filter_enabled: "_core__.RuntimeFractionalPercent" = betterproto.message_field( - 9, group="enabled_specifier" - ) - # Specifies the % of requests for which the CORS policies will be evaluated - # and tracked, but not enforced. This field is intended to be used when - # ``filter_enabled`` and ``enabled`` are off. One of those fields have to - # explicitly disable the filter in order for this setting to take effect. If - # :ref:`runtime_key - # ` is specified, - # Envoy will lookup the runtime key to get the percentage of requests for - # which it will evaluate and track the request's *Origin* to determine if - # it's valid but will not enforce any policies. - shadow_enabled: "_core__.RuntimeFractionalPercent" = betterproto.message_field(10) - - def __post_init__(self) -> None: - super().__post_init__() - if self.allow_origin: - warnings.warn("CorsPolicy.allow_origin is deprecated", DeprecationWarning) - if self.allow_origin_regex: - warnings.warn( - "CorsPolicy.allow_origin_regex is deprecated", DeprecationWarning - ) - if self.enabled: - warnings.warn("CorsPolicy.enabled is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class RouteAction(betterproto.Message): - """[#next-free-field: 34]""" - - # Indicates the upstream cluster to which the request should be routed to. - cluster: str = betterproto.string_field(1, group="cluster_specifier") - # Envoy will determine the cluster to route to by reading the value of the - # HTTP header named by cluster_header from the request headers. If the header - # is not found or the referenced cluster does not exist, Envoy will return a - # 404 response. .. attention:: Internally, Envoy always uses the HTTP/2 - # *:authority* header to represent the HTTP/1 *Host* header. Thus, if - # attempting to match on *Host*, match on *:authority* instead. .. note:: - # If the header appears multiple times only the first value is used. - cluster_header: str = betterproto.string_field(2, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. See :ref:`traffic splitting - # ` for additional - # documentation. - weighted_clusters: "WeightedCluster" = betterproto.message_field( - 3, group="cluster_specifier" - ) - # The HTTP status code to use when configured cluster is not found. The - # default response code is 503 Service Unavailable. - cluster_not_found_response_code: "RouteActionClusterNotFoundResponseCode" = ( - betterproto.enum_field(20) - ) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what's set in - # this field will be considered for load balancing. If using - # :ref:`weighted_clusters - # `, metadata will be - # merged, with values provided there taking precedence. The filter name - # should be specified as *envoy.lb*. - metadata_match: "_core__.Metadata" = betterproto.message_field(4) - # Indicates that during forwarding, the matched prefix (or path) should be - # swapped with this value. This option allows application URLs to be rooted - # at a different path from those exposed at the reverse proxy layer. The - # router filter will place the original path before rewrite into the - # :ref:`x-envoy-original-path ` header. Only one of *prefix_rewrite* or :ref:`regex_rewrite - # ` may be specified. .. - # attention:: Pay careful attention to the use of trailing slashes in the - # :ref:`route's match ` prefix value. - # Stripping a prefix from a path requires multiple Routes to handle all - # cases. For example, rewriting */prefix* to */* and */prefix/etc* to - # */etc* cannot be done in a single :ref:`Route - # `, as shown by the below config entries: .. - # code-block:: yaml - match: prefix: "/prefix/" route: - # prefix_rewrite: "/" - match: prefix: "/prefix" route: - # prefix_rewrite: "/" Having above entries in the config, requests to - # */prefix* will be stripped to */*, while requests to */prefix/etc* will - # be stripped to */etc*. - prefix_rewrite: str = betterproto.string_field(5) - # Indicates that during forwarding, portions of the path that match the - # pattern should be rewritten, even allowing the substitution of capture - # groups from the pattern into the new path as specified by the rewrite - # substitution string. This is useful to allow application paths to be - # rewritten in a way that is aware of segments with variable content like - # identifiers. The router filter will place the original path as it was - # before the rewrite into the :ref:`x-envoy-original-path - # ` header. Only one of - # :ref:`prefix_rewrite ` or - # *regex_rewrite* may be specified. Examples using Google's `RE2 - # `_ engine: * The path pattern - # ``^/service/([^/]+)(/.*)$`` paired with a substitution string of - # ``\2/instance/\1`` would transform ``/service/foo/v1/api`` into - # ``/v1/api/instance/foo``. * The pattern ``one`` paired with a substitution - # string of ``two`` would transform ``/xxx/one/yyy/one/zzz`` into - # ``/xxx/two/yyy/two/zzz``. * The pattern ``^(.*?)one(.*)$`` paired with a - # substitution string of ``\1two\2`` would replace only the first - # occurrence of ``one``, transforming path ``/xxx/one/yyy/one/zzz`` into - # ``/xxx/two/yyy/one/zzz``. * The pattern ``(?i)/xxx/`` paired with a - # substitution string of ``/yyy/`` would do a case-insensitive match and - # transform path ``/aaa/XxX/bbb`` to ``/aaa/yyy/bbb``. - regex_rewrite: "___type_matcher__.RegexMatchAndSubstitute" = ( - betterproto.message_field(32) - ) - # Indicates that during forwarding, the host header will be swapped with this - # value. - host_rewrite: str = betterproto.string_field(6, group="host_rewrite_specifier") - # Indicates that during forwarding, the host header will be swapped with the - # hostname of the upstream host chosen by the cluster manager. This option is - # applicable only when the destination cluster for a route is of type - # *strict_dns* or *logical_dns*. Setting this to true with other cluster - # types has no effect. - auto_host_rewrite: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL, group="host_rewrite_specifier" - ) - # Indicates that during forwarding, the host header will be swapped with the - # content of given downstream or :ref:`custom - # ` header. If header - # value is empty, host header is left intact. .. attention:: Pay attention - # to the potential security implications of using this option. Provided - # header must come from trusted source. .. note:: If the header appears - # multiple times only the first value is used. - auto_host_rewrite_header: str = betterproto.string_field( - 29, group="host_rewrite_specifier" - ) - # Specifies the upstream timeout for the route. If not specified, the default - # is 15s. This spans between the point at which the entire downstream request - # (i.e. end-of-stream) has been processed and when the upstream response has - # been completely processed. A value of 0 will disable the route's timeout. - # .. note:: This timeout includes all retries. See also - # :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - # :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, - # and the :ref:`retry overview `. - timeout: timedelta = betterproto.message_field(8) - # Specifies the idle timeout for the route. If not specified, there is no - # per-route idle timeout, although the connection manager wide - # :ref:`stream_idle_timeout ` will still - # apply. A value of 0 will completely disable the route's idle timeout, even - # if a connection manager stream idle timeout is configured. The idle timeout - # is distinct to :ref:`timeout `, - # which provides an upper bound on the upstream response time; - # :ref:`idle_timeout ` - # instead bounds the amount of time the request's stream may be idle. After - # header decoding, the idle timeout will apply on downstream and upstream - # request events. Each time an encode/decode event for headers or data is - # processed for the stream, the timer will be reset. If the timeout fires, - # the stream is terminated with a 408 Request Timeout error code if no - # upstream response header has been received, otherwise a stream reset - # occurs. - idle_timeout: timedelta = betterproto.message_field(24) - # Indicates that the route has a retry policy. Note that if this is set, - # it'll take precedence over the virtual host level retry policy entirely - # (e.g.: policies are not merged, most internal one becomes the enforced - # policy). - retry_policy: "RetryPolicy" = betterproto.message_field(9) - # [#not-implemented-hide:] Specifies the configuration for retry policy - # extension. Note that if this is set, it'll take precedence over the virtual - # host level retry policy entirely (e.g.: policies are not merged, most - # internal one becomes the enforced policy). :ref:`Retry policy - # ` should not be set if this - # field is used. - retry_policy_typed_config: "betterproto_lib_google_protobuf.Any" = ( - betterproto.message_field(33) - ) - # Indicates that the route has a request mirroring policy. .. attention:: - # This field has been deprecated in favor of `request_mirror_policies` which - # supports one or more mirroring policies. - request_mirror_policy: "RouteActionRequestMirrorPolicy" = betterproto.message_field( - 10 - ) - # Indicates that the route has request mirroring policies. - request_mirror_policies: List[ - "RouteActionRequestMirrorPolicy" - ] = betterproto.message_field(30) - # Optionally specifies the :ref:`routing priority - # `. - priority: "_core__.RoutingPriority" = betterproto.enum_field(11) - # Specifies a set of rate limit configurations that could be applied to the - # route. - rate_limits: List["RateLimit"] = betterproto.message_field(13) - # Specifies if the rate limit filter should include the virtual host rate - # limits. By default, if the route configured rate limits, the virtual host - # :ref:`rate_limits ` are not - # applied to the request. - include_vh_rate_limits: Optional[bool] = betterproto.message_field( - 14, wraps=betterproto.TYPE_BOOL - ) - # Specifies a list of hash policies to use for ring hash load balancing. Each - # hash policy is evaluated individually and the combined result is used to - # route the request. The method of combination is deterministic such that - # identical lists of hash policies will produce the same hash. Since a hash - # policy examines specific parts of a request, it can fail to produce a hash - # (i.e. if the hashed header is not present). If (and only if) all configured - # hash policies fail to generate a hash, no hash will be produced for the - # route. In this case, the behavior is the same as if no hash policies were - # specified (i.e. the ring hash load balancer will choose a random backend). - # If a hash policy has the "terminal" attribute set to true, and there is - # already a hash generated, the hash is returned immediately, ignoring the - # rest of the hash policy list. - hash_policy: List["RouteActionHashPolicy"] = betterproto.message_field(15) - # Indicates that the route has a CORS policy. - cors: "CorsPolicy" = betterproto.message_field(17) - # If present, and the request is a gRPC request, use the `grpc-timeout header - # `_, or its - # default value (infinity) instead of :ref:`timeout - # `, but limit the applied timeout - # to the maximum value specified here. If configured as 0, the maximum - # allowed timeout for gRPC requests is infinity. If not configured at all, - # the `grpc-timeout` header is not used and gRPC requests time out like any - # other requests using :ref:`timeout - # ` or its default. This can be - # used to prevent unexpected upstream request timeouts due to potentially - # long time gaps between gRPC request and response in gRPC streaming mode. .. - # note:: If a timeout is specified using - # :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - # precedence over `grpc-timeout header - # `_, when - # both are present. See also :ref:`config_http_filters_router_x-envoy- - # upstream-rq-timeout-ms`, :ref:`config_http_filters_router_x-envoy- - # upstream-rq-per-try-timeout-ms`, and the :ref:`retry overview - # `. - max_grpc_timeout: timedelta = betterproto.message_field(23) - # If present, Envoy will adjust the timeout provided by the `grpc-timeout` - # header by subtracting the provided duration from the header. This is useful - # in allowing Envoy to set its global timeout to be less than that of the - # deadline imposed by the calling client, which makes it more likely that - # Envoy will handle the timeout instead of having the call canceled by the - # client. The offset will only be applied if the provided grpc_timeout is - # greater than the offset. This ensures that the offset will only ever - # decrease the timeout and never set it to 0 (meaning infinity). - grpc_timeout_offset: timedelta = betterproto.message_field(28) - upgrade_configs: List["RouteActionUpgradeConfig"] = betterproto.message_field(25) - internal_redirect_action: "RouteActionInternalRedirectAction" = ( - betterproto.enum_field(26) - ) - # An internal redirect is handled, iff the number of previous internal - # redirects that a downstream request has encountered is lower than this - # value, and :ref:`internal_redirect_action - # ` is set to - # :ref:`HANDLE_INTERNAL_REDIRECT ` In the case where a - # downstream request is bounced among multiple routes by internal redirect, - # the first route that hits this threshold, or has - # :ref:`internal_redirect_action - # ` set to - # :ref:`PASS_THROUGH_INTERNAL_REDIRECT ` will pass the - # redirect back to downstream. If not specified, at most one redirect will be - # followed. - max_internal_redirects: Optional[int] = betterproto.message_field( - 31, wraps=betterproto.TYPE_UINT32 - ) - # Indicates that the route has a hedge policy. Note that if this is set, - # it'll take precedence over the virtual host level hedge policy entirely - # (e.g.: policies are not merged, most internal one becomes the enforced - # policy). - hedge_policy: "HedgePolicy" = betterproto.message_field(27) - - def __post_init__(self) -> None: - super().__post_init__() - if self.request_mirror_policy: - warnings.warn( - "RouteAction.request_mirror_policy is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class RouteActionRequestMirrorPolicy(betterproto.Message): - """ - The router is capable of shadowing traffic from one cluster to another. The - current implementation is "fire and forget," meaning Envoy will not wait - for the shadow cluster to respond before returning the response from the - primary cluster. All normal statistics are collected for the shadow cluster - making this feature useful for testing. During shadowing, the - host/authority header is altered such that *-shadow* is appended. This is - useful for logging. For example, *cluster1* becomes *cluster1-shadow*. .. - note:: Shadowing will not be triggered if the primary cluster does not - exist. - """ - - # Specifies the cluster that requests will be mirrored to. The cluster must - # exist in the cluster manager configuration. - cluster: str = betterproto.string_field(1) - # If not specified, all requests to the target cluster will be mirrored. If - # specified, Envoy will lookup the runtime key to get the % of requests to - # mirror. Valid values are from 0 to 10000, allowing for increments of 0.01% - # of requests to be mirrored. If the runtime key is specified in the - # configuration but not present in runtime, 0 is the default and thus 0% of - # requests will be mirrored. .. attention:: **This field is deprecated**. - # Set the :ref:`runtime_fraction - # ` - # field instead. Mirroring occurs if both this and - # ` - # are not set. - runtime_key: str = betterproto.string_field(2) - # If not specified, all requests to the target cluster will be mirrored. If - # specified, this field takes precedence over the `runtime_key` field and - # requests must also fall under the percentage of matches indicated by this - # field. For some fraction N/D, a random number in the range [0,D) is - # selected. If the number is <= the value of the numerator N, or if the key - # is not present, the default value, the request will be mirrored. - runtime_fraction: "_core__.RuntimeFractionalPercent" = betterproto.message_field(3) - # Determines if the trace span should be sampled. Defaults to true. - trace_sampled: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.runtime_key: - warnings.warn( - "RouteActionRequestMirrorPolicy.runtime_key is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicy(betterproto.Message): - """ - Specifies the route's hashing policy if the upstream cluster uses a hashing - :ref:`load balancer `. [#next-free- - field: 7] - """ - - # Header hash policy. - header: "RouteActionHashPolicyHeader" = betterproto.message_field( - 1, group="policy_specifier" - ) - # Cookie hash policy. - cookie: "RouteActionHashPolicyCookie" = betterproto.message_field( - 2, group="policy_specifier" - ) - # Connection properties hash policy. - connection_properties: "RouteActionHashPolicyConnectionProperties" = ( - betterproto.message_field(3, group="policy_specifier") - ) - # Query parameter hash policy. - query_parameter: "RouteActionHashPolicyQueryParameter" = betterproto.message_field( - 5, group="policy_specifier" - ) - # Filter state hash policy. - filter_state: "RouteActionHashPolicyFilterState" = betterproto.message_field( - 6, group="policy_specifier" - ) - # The flag that short-circuits the hash computing. This field provides a - # 'fallback' style of configuration: "if a terminal policy doesn't work, - # fallback to rest of the policy list", it saves time when the terminal - # policy works. If true, and there is already a hash computed, ignore rest of - # the list of hash polices. For example, if the following hash methods are - # configured: ========= ======== specifier terminal ========= ======== - # Header A true Header B false Header C false ========= ======== The - # generateHash process ends if policy "header A" generates a hash, as it's a - # terminal policy. - terminal: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyHeader(betterproto.Message): - # The name of the request header that will be used to obtain the hash key. If - # the request header is not present, no hash will be produced. - header_name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyCookie(betterproto.Message): - """ - Envoy supports two types of cookie affinity: 1. Passive. Envoy takes a - cookie that's present in the cookies header and hashes on its value. 2. - Generated. Envoy generates and sets a cookie with an expiration (TTL) on - the first request from the client in its response to the client, based - on the endpoint the request gets sent to. The client then presents this - on the next and all subsequent requests. The hash of this is sufficient - to ensure these requests get sent to the same endpoint. The cookie is - generated by hashing the source and destination ports and addresses so - that multiple independent HTTP2 streams on the same connection will - independently receive the same cookie, even if they arrive at the Envoy - simultaneously. - """ - - # The name of the cookie that will be used to obtain the hash key. If the - # cookie is not present and ttl below is not set, no hash will be produced. - name: str = betterproto.string_field(1) - # If specified, a cookie with the TTL will be generated if the cookie is not - # present. If the TTL is present and zero, the generated cookie will be a - # session cookie. - ttl: timedelta = betterproto.message_field(2) - # The name of the path for the cookie. If no path is specified here, no path - # will be set for the cookie. - path: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyConnectionProperties(betterproto.Message): - # Hash on source IP address. - source_ip: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyQueryParameter(betterproto.Message): - # The name of the URL query parameter that will be used to obtain the hash - # key. If the parameter is not present, no hash will be produced. Query - # parameter names are case-sensitive. - name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyFilterState(betterproto.Message): - # The name of the Object in the per-request filterState, which is an - # Envoy::Hashable object. If there is no data associated with the key, or the - # stored object is not Envoy::Hashable, no hash will be produced. - key: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class RouteActionUpgradeConfig(betterproto.Message): - """ - Allows enabling and disabling upgrades on a per-route basis. This overrides - any enabled/disabled upgrade filter chain specified in the - HttpConnectionManager :ref:`upgrade_configs ` - but does not affect any custom filter chain specified there. - """ - - # The case-insensitive name of this upgrade, e.g. "websocket". For each - # upgrade type present in upgrade_configs, requests with Upgrade: - # [upgrade_type] will be proxied upstream. - upgrade_type: str = betterproto.string_field(1) - # Determines if upgrades are available on this route. Defaults to true. - enabled: Optional[bool] = betterproto.message_field(2, wraps=betterproto.TYPE_BOOL) - - -@dataclass(eq=False, repr=False) -class RetryPolicy(betterproto.Message): - """ - HTTP retry :ref:`architecture overview `. - [#next-free-field: 11] - """ - - # Specifies the conditions under which retry takes place. These are the same - # conditions documented for :ref:`config_http_filters_router_x-envoy-retry- - # on` and :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - retry_on: str = betterproto.string_field(1) - # Specifies the allowed number of retries. This parameter is optional and - # defaults to 1. These are the same conditions documented for - # :ref:`config_http_filters_router_x-envoy-max-retries`. - num_retries: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Specifies a non-zero upstream timeout per retry attempt. This parameter is - # optional. The same conditions documented for - # :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - # apply. .. note:: If left unspecified, Envoy will use the global - # :ref:`route timeout ` for the - # request. Consequently, when using a :ref:`5xx - # ` based retry policy, a - # request that times out will not be retried as the total timeout budget - # would have been exhausted. - per_try_timeout: timedelta = betterproto.message_field(3) - # Specifies an implementation of a RetryPriority which is used to determine - # the distribution of load across priorities used for retries. Refer to - # :ref:`retry plugin configuration ` for - # more details. - retry_priority: "RetryPolicyRetryPriority" = betterproto.message_field(4) - # Specifies a collection of RetryHostPredicates that will be consulted when - # selecting a host for retries. If any of the predicates reject the host, - # host selection will be reattempted. Refer to :ref:`retry plugin - # configuration ` for more details. - retry_host_predicate: List[ - "RetryPolicyRetryHostPredicate" - ] = betterproto.message_field(5) - # The maximum number of times host selection will be reattempted before - # giving up, at which point the host that was last selected will be routed - # to. If unspecified, this will default to retrying once. - host_selection_retry_max_attempts: int = betterproto.int64_field(6) - # HTTP status codes that should trigger a retry in addition to those - # specified by retry_on. - retriable_status_codes: List[int] = betterproto.uint32_field(7) - # Specifies parameters that control retry back off. This parameter is - # optional, in which case the default base interval is 25 milliseconds or, if - # set, the current value of the `upstream.base_retry_backoff_ms` runtime - # parameter. The default maximum interval is 10 times the base interval. The - # documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - # describes Envoy's back-off algorithm. - retry_back_off: "RetryPolicyRetryBackOff" = betterproto.message_field(8) - # HTTP response headers that trigger a retry if present in the response. A - # retry will be triggered if any of the header matches match the upstream - # response headers. The field is only consulted if 'retriable-headers' retry - # policy is active. - retriable_headers: List["HeaderMatcher"] = betterproto.message_field(9) - # HTTP headers which must be present in the request for retries to be - # attempted. - retriable_request_headers: List["HeaderMatcher"] = betterproto.message_field(10) - - -@dataclass(eq=False, repr=False) -class RetryPolicyRetryPriority(betterproto.Message): - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn( - "RetryPolicyRetryPriority.config is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class RetryPolicyRetryHostPredicate(betterproto.Message): - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn( - "RetryPolicyRetryHostPredicate.config is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class RetryPolicyRetryBackOff(betterproto.Message): - # Specifies the base interval between retries. This parameter is required and - # must be greater than zero. Values less than 1 ms are rounded up to 1 ms. - # See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - # of Envoy's back-off algorithm. - base_interval: timedelta = betterproto.message_field(1) - # Specifies the maximum interval between retries. This parameter is optional, - # but must be greater than or equal to the `base_interval` if set. The - # default is 10 times the `base_interval`. See - # :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of - # Envoy's back-off algorithm. - max_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HedgePolicy(betterproto.Message): - """ - HTTP request hedging :ref:`architecture overview - `. - """ - - # Specifies the number of initial requests that should be sent upstream. Must - # be at least 1. Defaults to 1. [#not-implemented-hide:] - initial_requests: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Specifies a probability that an additional upstream request should be sent - # on top of what is specified by initial_requests. Defaults to 0. [#not- - # implemented-hide:] - additional_request_chance: "___type__.FractionalPercent" = ( - betterproto.message_field(2) - ) - # Indicates that a hedged request should be sent when the per-try timeout is - # hit. This means that a retry will be issued without resetting the original - # request, leaving multiple upstream requests in flight. The first request to - # complete successfully will be the one returned to the caller. * At any - # time, a successful response (i.e. not triggering any of the retry-on - # conditions) would be returned to the client. * Before per-try timeout, an - # error response (per retry-on conditions) would be retried immediately or - # returned ot the client if there are no more retries left. * After per-try - # timeout, an error response would be discarded, as a retry in the form of a - # hedged request is already in progress. Note: For this to have effect, you - # must have a :ref:`RetryPolicy ` that - # retries at least one error code and specifies a maximum number of retries. - # Defaults to false. - hedge_on_per_try_timeout: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class RedirectAction(betterproto.Message): - """[#next-free-field: 9]""" - - # The scheme portion of the URL will be swapped with "https". - https_redirect: bool = betterproto.bool_field(4, group="scheme_rewrite_specifier") - # The scheme portion of the URL will be swapped with this value. - scheme_redirect: str = betterproto.string_field(7, group="scheme_rewrite_specifier") - # The host portion of the URL will be swapped with this value. - host_redirect: str = betterproto.string_field(1) - # The port value of the URL will be swapped with this value. - port_redirect: int = betterproto.uint32_field(8) - # The path portion of the URL will be swapped with this value. Please note - # that query string in path_redirect will override the request's query string - # and will not be stripped. For example, let's say we have the following - # routes: - match: { path: "/old-path-1" } redirect: { path_redirect: - # "/new-path-1" } - match: { path: "/old-path-2" } redirect: { - # path_redirect: "/new-path-2", strip-query: "true" } - match: { path: "/old- - # path-3" } redirect: { path_redirect: "/new-path-3?foo=1", strip_query: - # "true" } 1. if request uri is "/old-path-1?bar=1", users will be redirected - # to "/new-path-1?bar=1" 2. if request uri is "/old-path-2?bar=1", users will - # be redirected to "/new-path-2" 3. if request uri is "/old-path-3?bar=1", - # users will be redirected to "/new-path-3?foo=1" - path_redirect: str = betterproto.string_field(2, group="path_rewrite_specifier") - # Indicates that during redirection, the matched prefix (or path) should be - # swapped with this value. This option allows redirect URLs be dynamically - # created based on the request. .. attention:: Pay attention to the use of - # trailing slashes as mentioned in :ref:`RouteAction's prefix_rewrite - # `. - prefix_rewrite: str = betterproto.string_field(5, group="path_rewrite_specifier") - # The HTTP status code to use in the redirect response. The default response - # code is MOVED_PERMANENTLY (301). - response_code: "RedirectActionRedirectResponseCode" = betterproto.enum_field(3) - # Indicates that during redirection, the query portion of the URL will be - # removed. Default value is false. - strip_query: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class DirectResponseAction(betterproto.Message): - # Specifies the HTTP response status to be returned. - status: int = betterproto.uint32_field(1) - # Specifies the content of the response body. If this setting is omitted, no - # body is included in the generated response. .. note:: Headers can be - # specified using *response_headers_to_add* in the enclosing - # :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` - # or :ref:`envoy_api_msg_route.VirtualHost`. - body: "_core__.DataSource" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Decorator(betterproto.Message): - # The operation name associated with the request matched to this route. If - # tracing is enabled, this information will be used as the span name reported - # for this request. .. note:: For ingress (inbound) requests, or egress - # (outbound) responses, this value may be overridden by the :ref:`x-envoy- - # decorator-operation ` header. - operation: str = betterproto.string_field(1) - # Whether the decorated details should be propagated to the other party. The - # default is true. - propagate: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class Tracing(betterproto.Message): - # Target percentage of requests managed by this HTTP connection manager that - # will be force traced if the :ref:`x-client-trace-id - # ` header is set. This field - # is a direct analog for the runtime variable 'tracing.client_sampling' in - # the :ref:`HTTP Connection Manager `. Default: - # 100% - client_sampling: "___type__.FractionalPercent" = betterproto.message_field(1) - # Target percentage of requests managed by this HTTP connection manager that - # will be randomly selected for trace generation, if not requested by the - # client or not forced. This field is a direct analog for the runtime - # variable 'tracing.random_sampling' in the :ref:`HTTP Connection Manager - # `. Default: 100% - random_sampling: "___type__.FractionalPercent" = betterproto.message_field(2) - # Target percentage of requests managed by this HTTP connection manager that - # will be traced after all other sampling checks have been applied (client- - # directed, force tracing, random sampling). This field functions as an upper - # limit on the total configured sampling rate. For instance, setting - # client_sampling to 100% but overall_sampling to 1% will result in only 1% - # of client requests with the appropriate headers to be force traced. This - # field is a direct analog for the runtime variable 'tracing.global_enabled' - # in the :ref:`HTTP Connection Manager `. - # Default: 100% - overall_sampling: "___type__.FractionalPercent" = betterproto.message_field(3) - # A list of custom tags with unique tag name to create tags for the active - # span. It will take effect after merging with the :ref:`corresponding - # configuration ` configured in the HTTP - # connection manager. If two tags with the same name are configured each in - # the HTTP connection manager and the route level, the one configured here - # takes priority. - custom_tags: List["___type_tracing_v2__.CustomTag"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class VirtualCluster(betterproto.Message): - """ - A virtual cluster is a way of specifying a regex matching rule against - certain important endpoints such that statistics are generated explicitly - for the matched requests. The reason this is useful is that when doing - prefix/path matching Envoy does not always know what the application - considers to be an endpoint. Thus, it’s impossible for Envoy to generically - emit per endpoint statistics. However, often systems have highly critical - endpoints that they wish to get “perfect” statistics on. Virtual cluster - statistics are perfect in the sense that they are emitted on the downstream - side such that they include network level failures. Documentation for - :ref:`virtual cluster statistics - `. .. note:: Virtual clusters - are a useful tool, but we do not recommend setting up a virtual cluster for - every application endpoint. This is both not easily maintainable and as - well the matching and statistics output are not free. - """ - - # Specifies a regex pattern to use for matching requests. The entire path of - # the request must match the regex. The regex grammar used is defined `here - # `_. Examples: * The - # regex ``/rides/\d+`` matches the path */rides/0* * The regex ``/rides/\d+`` - # matches the path */rides/123* * The regex ``/rides/\d+`` does not match the - # path */rides/123/456* .. attention:: This field has been deprecated in - # favor of `headers` as it is not safe for use with untrusted input in all - # cases. - pattern: str = betterproto.string_field(1) - # Specifies a list of header matchers to use for matching requests. Each - # specified header must match. The pseudo-headers `:path` and `:method` can - # be used to match the request path and method, respectively. - headers: List["HeaderMatcher"] = betterproto.message_field(4) - # Specifies the name of the virtual cluster. The virtual cluster name as well - # as the virtual host name are used when emitting statistics. The statistics - # are emitted by the router filter and are documented :ref:`here - # `. - name: str = betterproto.string_field(2) - # Optionally specifies the HTTP method to match on. For example GET, PUT, - # etc. .. attention:: This field has been deprecated in favor of `headers`. - method: "_core__.RequestMethod" = betterproto.enum_field(3) - - def __post_init__(self) -> None: - super().__post_init__() - if self.pattern: - warnings.warn("VirtualCluster.pattern is deprecated", DeprecationWarning) - if self.method: - warnings.warn("VirtualCluster.method is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """ - Global rate limiting :ref:`architecture overview - `. - """ - - # Refers to the stage set in the filter. The rate limit configuration only - # applies to filters with the same stage number. The default stage number is - # 0. .. note:: The filter supports a range of 0 - 10 inclusively for stage - # numbers. - stage: Optional[int] = betterproto.message_field(1, wraps=betterproto.TYPE_UINT32) - # The key to be set in runtime to disable this rate limit configuration. - disable_key: str = betterproto.string_field(2) - # A list of actions that are to be applied for this rate limit configuration. - # Order matters as the actions are processed sequentially and the descriptor - # is composed by appending descriptor entries in that sequence. If an action - # cannot append a descriptor entry, no descriptor is generated for the - # configuration. See :ref:`composing actions - # ` for additional - # documentation. - actions: List["RateLimitAction"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitAction(betterproto.Message): - """[#next-free-field: 7]""" - - # Rate limit on source cluster. - source_cluster: "RateLimitActionSourceCluster" = betterproto.message_field( - 1, group="action_specifier" - ) - # Rate limit on destination cluster. - destination_cluster: "RateLimitActionDestinationCluster" = ( - betterproto.message_field(2, group="action_specifier") - ) - # Rate limit on request headers. - request_headers: "RateLimitActionRequestHeaders" = betterproto.message_field( - 3, group="action_specifier" - ) - # Rate limit on remote address. - remote_address: "RateLimitActionRemoteAddress" = betterproto.message_field( - 4, group="action_specifier" - ) - # Rate limit on a generic key. - generic_key: "RateLimitActionGenericKey" = betterproto.message_field( - 5, group="action_specifier" - ) - # Rate limit on the existence of request headers. - header_value_match: "RateLimitActionHeaderValueMatch" = betterproto.message_field( - 6, group="action_specifier" - ) - - -@dataclass(eq=False, repr=False) -class RateLimitActionSourceCluster(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("source_cluster", "") is derived from the :option:`--service-cluster` option. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitActionDestinationCluster(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("destination_cluster", "") Once a - request matches against a route table rule, a routed cluster is determined - by one of the following :ref:`route table configuration - ` settings: * :ref:`cluster - ` indicates the upstream cluster - to route to. * :ref:`weighted_clusters - ` chooses a cluster - randomly from a set of clusters with attributed weight. * - :ref:`cluster_header ` - indicates which header in the request contains the target cluster. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitActionRequestHeaders(betterproto.Message): - """ - The following descriptor entry is appended when a header contains a key - that matches the *header_name*: .. code-block:: cpp ("", - "") - """ - - # The header name to be queried from the request headers. The header’s value - # is used to populate the value of the descriptor entry for the - # descriptor_key. - header_name: str = betterproto.string_field(1) - # The key to use in the descriptor entry. - descriptor_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimitActionRemoteAddress(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor and is - populated using the trusted address from :ref:`x-forwarded-for - `: .. code-block:: cpp - ("remote_address", "") - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitActionGenericKey(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("generic_key", "") - """ - - # The value to use in the descriptor entry. - descriptor_value: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class RateLimitActionHeaderValueMatch(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("header_match", "") - """ - - # The value to use in the descriptor entry. - descriptor_value: str = betterproto.string_field(1) - # If set to true, the action will append a descriptor entry when the request - # matches the headers. If set to false, the action will append a descriptor - # entry when the request does not match the headers. The default value is - # true. - expect_match: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # Specifies a set of headers that the rate limit action should match on. The - # action will check the request’s headers against all the specified headers - # in the config. A match will happen if all the headers in the config are - # present in the request with the same values (or based on presence if the - # value field is not in the config). - headers: List["HeaderMatcher"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class HeaderMatcher(betterproto.Message): - """ - .. attention:: Internally, Envoy always uses the HTTP/2 *:authority* - header to represent the HTTP/1 *Host* header. Thus, if attempting to - match on *Host*, match on *:authority* instead. .. attention:: To route - on HTTP method, use the special HTTP/2 *:method* header. This works for - both HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., .. code- - block:: json { "name": ":method", "exact_match": "POST" - } .. attention:: In the absence of any header match specifier, match will - default to :ref:`present_match - `. i.e, a request that - has the :ref:`name ` header - will match, regardless of the header's value. [#next-major-version: - HeaderMatcher should be refactored to use StringMatcher.] [#next-free- - field: 12] - """ - - # Specifies the name of the header in the request. - name: str = betterproto.string_field(1) - # If specified, header match will be performed based on the value of the - # header. - exact_match: str = betterproto.string_field(4, group="header_match_specifier") - # If specified, this regex string is a regular expression rule which implies - # the entire request header value must match the regex. The rule will not - # match if only a subsequence of the request header value matches the regex. - # The regex grammar used in the value field is defined `here - # `_. Examples: * The - # regex ``\d{3}`` matches the value *123* * The regex ``\d{3}`` does not - # match the value *1234* * The regex ``\d{3}`` does not match the value - # *123.456* .. attention:: This field has been deprecated in favor of - # `safe_regex_match` as it is not safe for use with untrusted input in all - # cases. - regex_match: str = betterproto.string_field(5, group="header_match_specifier") - # If specified, this regex string is a regular expression rule which implies - # the entire request header value must match the regex. The rule will not - # match if only a subsequence of the request header value matches the regex. - safe_regex_match: "___type_matcher__.RegexMatcher" = betterproto.message_field( - 11, group="header_match_specifier" - ) - # If specified, header match will be performed based on range. The rule will - # match if the request header value is within this range. The entire request - # header value must represent an integer in base 10 notation: consisting of - # an optional plus or minus sign followed by a sequence of digits. The rule - # will not match if the header value does not represent an integer. Match - # will fail for empty values, floating point numbers or if only a subsequence - # of the header value is an integer. Examples: * For range [-10,0), route - # will match for header value -1, but not for 0, "somestring", 10.9, - # "-1somestring" - range_match: "___type__.Int64Range" = betterproto.message_field( - 6, group="header_match_specifier" - ) - # If specified, header match will be performed based on whether the header is - # in the request. - present_match: bool = betterproto.bool_field(7, group="header_match_specifier") - # If specified, header match will be performed based on the prefix of the - # header value. Note: empty prefix is not allowed, please use present_match - # instead. Examples: * The prefix *abcd* matches the value *abcdxyz*, but not - # for *abcxyz*. - prefix_match: str = betterproto.string_field(9, group="header_match_specifier") - # If specified, header match will be performed based on the suffix of the - # header value. Note: empty suffix is not allowed, please use present_match - # instead. Examples: * The suffix *abcd* matches the value *xyzabcd*, but not - # for *xyzbcd*. - suffix_match: str = betterproto.string_field(10, group="header_match_specifier") - # If specified, the match result will be inverted before checking. Defaults - # to false. Examples: * The regex ``\d{3}`` does not match the value *1234*, - # so it will match when inverted. * The range [-10,0) will match the value - # -1, so it will not match when inverted. - invert_match: bool = betterproto.bool_field(8) - - def __post_init__(self) -> None: - super().__post_init__() - if self.regex_match: - warnings.warn("HeaderMatcher.regex_match is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class QueryParameterMatcher(betterproto.Message): - """ - Query parameter matching treats the query string of a request's :path - header as an ampersand-separated list of keys and/or key=value elements. - [#next-free-field: 7] - """ - - # Specifies the name of a key that must be present in the requested *path*'s - # query string. - name: str = betterproto.string_field(1) - # Specifies the value of the key. If the value is absent, a request that - # contains the key in its query string will match, whether the key appears - # with a value (e.g., "?debug=true") or not (e.g., "?debug") ..attention:: - # This field is deprecated. Use an `exact` match inside the `string_match` - # field. - value: str = betterproto.string_field(3) - # Specifies whether the query parameter value is a regular expression. - # Defaults to false. The entire query parameter value (i.e., the part to the - # right of the equals sign in "key=value") must match the regex. E.g., the - # regex ``\d+$`` will match *123* but not *a123* or *123a*. ..attention:: - # This field is deprecated. Use a `safe_regex` match inside the - # `string_match` field. - regex: Optional[bool] = betterproto.message_field(4, wraps=betterproto.TYPE_BOOL) - # Specifies whether a query parameter value should match against a string. - string_match: "___type_matcher__.StringMatcher" = betterproto.message_field( - 5, group="query_parameter_match_specifier" - ) - # Specifies whether a query parameter should be present. - present_match: bool = betterproto.bool_field( - 6, group="query_parameter_match_specifier" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.value: - warnings.warn( - "QueryParameterMatcher.value is deprecated", DeprecationWarning - ) - if self.regex: - warnings.warn( - "QueryParameterMatcher.regex is deprecated", DeprecationWarning - ) - - -from .. import core as _core__ -from .... import type as ___type__ -from ....type import matcher as ___type_matcher__ -from ....type.tracing import v2 as ___type_tracing_v2__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/__init__.py b/src/envoy_data_plane/envoy/config/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/accesslog/__init__.py b/src/envoy_data_plane/envoy/config/accesslog/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/accesslog/v2/__init__.py b/src/envoy_data_plane/envoy/config/accesslog/v2/__init__.py deleted file mode 100644 index 9cbfed6..0000000 --- a/src/envoy_data_plane/envoy/config/accesslog/v2/__init__.py +++ /dev/null @@ -1,109 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/accesslog/v2/als.proto, envoy/config/accesslog/v2/file.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FileAccessLog(betterproto.Message): - """ - Custom configuration for an :ref:`AccessLog - ` that writes log - entries directly to a file. Configures the built-in - *envoy.access_loggers.file* AccessLog. - """ - - # A path to a local file to which to write the access log entries. - path: str = betterproto.string_field(1) - # Access log :ref:`format string`. Envoy - # supports :ref:`custom access log formats ` as - # well as a :ref:`default format `. - format: str = betterproto.string_field(2, group="access_log_format") - # Access log :ref:`format dictionary`. - # All values are rendered as strings. - json_format: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 3, group="access_log_format" - ) - # Access log :ref:`format dictionary`. - # Values are rendered as strings, numbers, or boolean values as appropriate. - # Nested JSON objects may be produced by some command operators - # (e.g.FILTER_STATE or DYNAMIC_METADATA). See the documentation for a - # specific command operator for details. - typed_json_format: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(4, group="access_log_format") - ) - - -@dataclass(eq=False, repr=False) -class HttpGrpcAccessLogConfig(betterproto.Message): - """ - Configuration for the built-in *envoy.access_loggers.http_grpc* - :ref:`AccessLog `. This - configuration will populate :ref:`StreamAccessLogsMessage.http_logs - `. - [#extension: envoy.access_loggers.http_grpc] - """ - - common_config: "CommonGrpcAccessLogConfig" = betterproto.message_field(1) - # Additional request headers to log in - # :ref:`HTTPRequestProperties.request_headers - # `. - additional_request_headers_to_log: List[str] = betterproto.string_field(2) - # Additional response headers to log in - # :ref:`HTTPResponseProperties.response_headers `. - additional_response_headers_to_log: List[str] = betterproto.string_field(3) - # Additional response trailers to log in - # :ref:`HTTPResponseProperties.response_trailers `. - additional_response_trailers_to_log: List[str] = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class TcpGrpcAccessLogConfig(betterproto.Message): - """ - Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This - configuration will populate *StreamAccessLogsMessage.tcp_logs*. - [#extension: envoy.access_loggers.tcp_grpc] - """ - - common_config: "CommonGrpcAccessLogConfig" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CommonGrpcAccessLogConfig(betterproto.Message): - """Common configuration for gRPC access logs. [#next-free-field: 6]""" - - # The friendly name of the access log to be returned in - # :ref:`StreamAccessLogsMessage.Identifier - # `. - # This allows the access log server to differentiate between different access - # logs coming from the same Envoy. - log_name: str = betterproto.string_field(1) - # The gRPC service for the access log service. - grpc_service: "___api_v2_core__.GrpcService" = betterproto.message_field(2) - # Interval for flushing access logs to the gRPC stream. Logger will flush - # requests every time this interval is elapsed, or when batch size limit is - # hit, whichever comes first. Defaults to 1 second. - buffer_flush_interval: timedelta = betterproto.message_field(3) - # Soft size limit in bytes for access log entries buffer. Logger will buffer - # requests until this limit it hit, or every time flush interval is elapsed, - # whichever comes first. Setting it to zero effectively disables the - # batching. Defaults to 16384. - buffer_size_bytes: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Additional filter state objects to log in :ref:`filter_state_objects - # `. - # Logger will call `FilterState::Object::serializeAsProto` to serialize the - # filter state object. - filter_state_objects_to_log: List[str] = betterproto.string_field(5) - - -from ....api.v2 import core as ___api_v2_core__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/accesslog/v3/__init__.py b/src/envoy_data_plane/envoy/config/accesslog/v3/__init__.py deleted file mode 100644 index c95421c..0000000 --- a/src/envoy_data_plane/envoy/config/accesslog/v3/__init__.py +++ /dev/null @@ -1,273 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/accesslog/v3/accesslog.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ComparisonFilterOp(betterproto.Enum): - EQ = 0 - GE = 1 - LE = 2 - - -class GrpcStatusFilterStatus(betterproto.Enum): - OK = 0 - CANCELED = 1 - UNKNOWN = 2 - INVALID_ARGUMENT = 3 - DEADLINE_EXCEEDED = 4 - NOT_FOUND = 5 - ALREADY_EXISTS = 6 - PERMISSION_DENIED = 7 - RESOURCE_EXHAUSTED = 8 - FAILED_PRECONDITION = 9 - ABORTED = 10 - OUT_OF_RANGE = 11 - UNIMPLEMENTED = 12 - INTERNAL = 13 - UNAVAILABLE = 14 - DATA_LOSS = 15 - UNAUTHENTICATED = 16 - - -@dataclass(eq=False, repr=False) -class AccessLog(betterproto.Message): - # The name of the access log extension to instantiate. The name must match - # one of the compiled in loggers. See the :ref:`extensions listed in - # typed_config below ` for the - # default list of available loggers. - name: str = betterproto.string_field(1) - # Filter which is used to determine if the access log needs to be written. - filter: "AccessLogFilter" = betterproto.message_field(2) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 4, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class AccessLogFilter(betterproto.Message): - """[#next-free-field: 13]""" - - # Status code filter. - status_code_filter: "StatusCodeFilter" = betterproto.message_field( - 1, group="filter_specifier" - ) - # Duration filter. - duration_filter: "DurationFilter" = betterproto.message_field( - 2, group="filter_specifier" - ) - # Not health check filter. - not_health_check_filter: "NotHealthCheckFilter" = betterproto.message_field( - 3, group="filter_specifier" - ) - # Traceable filter. - traceable_filter: "TraceableFilter" = betterproto.message_field( - 4, group="filter_specifier" - ) - # Runtime filter. - runtime_filter: "RuntimeFilter" = betterproto.message_field( - 5, group="filter_specifier" - ) - # And filter. - and_filter: "AndFilter" = betterproto.message_field(6, group="filter_specifier") - # Or filter. - or_filter: "OrFilter" = betterproto.message_field(7, group="filter_specifier") - # Header filter. - header_filter: "HeaderFilter" = betterproto.message_field( - 8, group="filter_specifier" - ) - # Response flag filter. - response_flag_filter: "ResponseFlagFilter" = betterproto.message_field( - 9, group="filter_specifier" - ) - # gRPC status filter. - grpc_status_filter: "GrpcStatusFilter" = betterproto.message_field( - 10, group="filter_specifier" - ) - # Extension filter. [#extension-category: - # envoy.access_loggers.extension_filters] - extension_filter: "ExtensionFilter" = betterproto.message_field( - 11, group="filter_specifier" - ) - # Metadata Filter - metadata_filter: "MetadataFilter" = betterproto.message_field( - 12, group="filter_specifier" - ) - - -@dataclass(eq=False, repr=False) -class ComparisonFilter(betterproto.Message): - """Filter on an integer comparison.""" - - # Comparison operator. - op: "ComparisonFilterOp" = betterproto.enum_field(1) - # Value to compare against. - value: "__core_v3__.RuntimeUInt32" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StatusCodeFilter(betterproto.Message): - """Filters on HTTP response/status code.""" - - # Comparison. - comparison: "ComparisonFilter" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DurationFilter(betterproto.Message): - """Filters on total request duration in milliseconds.""" - - # Comparison. - comparison: "ComparisonFilter" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class NotHealthCheckFilter(betterproto.Message): - """ - Filters for requests that are not health check requests. A health check - request is marked by the health check filter. - """ - - pass - - -@dataclass(eq=False, repr=False) -class TraceableFilter(betterproto.Message): - """ - Filters for requests that are traceable. See the tracing overview for more - information on how a request becomes traceable. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RuntimeFilter(betterproto.Message): - """Filters for random sampling of requests.""" - - # Runtime key to get an optional overridden numerator for use in the - # *percent_sampled* field. If found in runtime, this value will replace the - # default numerator. - runtime_key: str = betterproto.string_field(1) - # The default sampling percentage. If not specified, defaults to 0% with - # denominator of 100. - percent_sampled: "___type_v3__.FractionalPercent" = betterproto.message_field(2) - # By default, sampling pivots on the header :ref:`x-request- - # id` being present. If - # :ref:`x-request-id` is present, - # the filter will consistently sample across multiple hosts based on the - # runtime key value and the value extracted from :ref:`x-request- - # id`. If it is missing, or - # *use_independent_randomness* is set to true, the filter will randomly - # sample based on the runtime key value alone. *use_independent_randomness* - # can be used for logging kill switches within complex nested :ref:`AndFilter - # ` and :ref:`OrFilter - # ` blocks that are easier to - # reason about from a probability perspective (i.e., setting to true will - # cause the filter to behave like an independent random variable when - # composed within logical operator filters). - use_independent_randomness: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class AndFilter(betterproto.Message): - """ - Performs a logical “and” operation on the result of each filter in filters. - Filters are evaluated sequentially and if one of them returns false, the - filter returns false immediately. - """ - - filters: List["AccessLogFilter"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class OrFilter(betterproto.Message): - """ - Performs a logical “or” operation on the result of each individual filter. - Filters are evaluated sequentially and if one of them returns true, the - filter returns true immediately. - """ - - filters: List["AccessLogFilter"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HeaderFilter(betterproto.Message): - """Filters requests based on the presence or value of a request header.""" - - # Only requests with a header which matches the specified HeaderMatcher will - # pass the filter check. - header: "__route_v3__.HeaderMatcher" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ResponseFlagFilter(betterproto.Message): - """ - Filters requests that received responses with an Envoy response flag set. A - list of the response flags can be found in the access log formatter - :ref:`documentation`. - """ - - # Only responses with the any of the flags listed in this field will be - # logged. This field is optional. If it is not specified, then any response - # flag will pass the filter check. - flags: List[str] = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcStatusFilter(betterproto.Message): - """ - Filters gRPC requests based on their response status. If a gRPC status is - not provided, the filter will infer the status from the HTTP status code. - """ - - # Logs only responses that have any one of the gRPC statuses in this field. - statuses: List["GrpcStatusFilterStatus"] = betterproto.enum_field(1) - # If included and set to true, the filter will instead block all responses - # with a gRPC status or inferred gRPC status enumerated in statuses, and - # allow all other responses. - exclude: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class MetadataFilter(betterproto.Message): - """ - Filters based on matching dynamic metadata. If the matcher path and key - correspond to an existing key in dynamic metadata, the request is logged - only if the matcher value is equal to the metadata value. If the matcher - path and key *do not* correspond to an existing key in dynamic metadata, - the request is logged only if match_if_key_not_found is "true" or unset. - """ - - # Matcher to check metadata for specified value. For example, to match on the - # access_log_hint metadata, set the filter to "envoy.common" and the path to - # "access_log_hint", and the value to "true". - matcher: "___type_matcher_v3__.MetadataMatcher" = betterproto.message_field(1) - # Default result if the key does not exist in dynamic metadata: if unset or - # true, then log; if false, then don't log. - match_if_key_not_found: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class ExtensionFilter(betterproto.Message): - """Extension filter is statically registered at runtime.""" - - # The name of the filter implementation to instantiate. The name must match a - # statically registered filter. - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -from ....type import v3 as ___type_v3__ -from ....type.matcher import v3 as ___type_matcher_v3__ -from ...core import v3 as __core_v3__ -from ...route import v3 as __route_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/bootstrap/__init__.py b/src/envoy_data_plane/envoy/config/bootstrap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/bootstrap/v2/__init__.py b/src/envoy_data_plane/envoy/config/bootstrap/v2/__init__.py deleted file mode 100644 index 1473d83..0000000 --- a/src/envoy_data_plane/envoy/config/bootstrap/v2/__init__.py +++ /dev/null @@ -1,333 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/bootstrap/v2/bootstrap.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Bootstrap(betterproto.Message): - """ - Bootstrap :ref:`configuration overview `. - [#next-free-field: 21] - """ - - # Node identity to present to the management server and for instance - # identification purposes (e.g. in generated headers). - node: "___api_v2_core__.Node" = betterproto.message_field(1) - # Statically specified resources. - static_resources: "BootstrapStaticResources" = betterproto.message_field(2) - # xDS configuration sources. - dynamic_resources: "BootstrapDynamicResources" = betterproto.message_field(3) - # Configuration for the cluster manager which owns all upstream clusters - # within the server. - cluster_manager: "ClusterManager" = betterproto.message_field(4) - # Health discovery service config option. (:ref:`core.ApiConfigSource - # `) - hds_config: "___api_v2_core__.ApiConfigSource" = betterproto.message_field(14) - # Optional file system path to search for startup flag files. - flags_path: str = betterproto.string_field(5) - # Optional set of stats sinks. - stats_sinks: List["__metrics_v2__.StatsSink"] = betterproto.message_field(6) - # Configuration for internal processing of stats. - stats_config: "__metrics_v2__.StatsConfig" = betterproto.message_field(13) - # Optional duration between flushes to configured stats sinks. For - # performance reasons Envoy latches counters and only flushes counters and - # gauges at a periodic interval. If not specified the default is 5000ms (5 - # seconds). Duration must be at least 1ms and at most 5 min. - stats_flush_interval: timedelta = betterproto.message_field(7) - # Optional watchdog configuration. - watchdog: "Watchdog" = betterproto.message_field(8) - # Configuration for an external tracing provider. .. attention:: This field - # has been deprecated in favor of - # :ref:`HttpConnectionManager.Tracing.provider `. - tracing: "__trace_v2__.Tracing" = betterproto.message_field(9) - # Configuration for the runtime configuration provider (deprecated). If not - # specified, a “null” provider will be used which will result in all defaults - # being used. - runtime: "Runtime" = betterproto.message_field(11) - # Configuration for the runtime configuration provider. If not specified, a - # “null” provider will be used which will result in all defaults being used. - layered_runtime: "LayeredRuntime" = betterproto.message_field(17) - # Configuration for the local administration HTTP server. - admin: "Admin" = betterproto.message_field(12) - # Optional overload manager configuration. - overload_manager: "__overload_v2_alpha__.OverloadManager" = ( - betterproto.message_field(15) - ) - # Enable :ref:`stats for event dispatcher `, defaults - # to false. Note that this records a value for each iteration of the event - # loop on every thread. This should normally be minimal overhead, but when - # using :ref:`statsd `, it will - # send each observed value over the wire individually because the statsd - # protocol doesn't have any way to represent a histogram summary. Be aware - # that this can be a very large volume of data. - enable_dispatcher_stats: bool = betterproto.bool_field(16) - # Optional string which will be used in lieu of x-envoy in prefixing headers. - # For example, if this string is present and set to X-Foo, then x-envoy- - # retry-on will be transformed into x-foo-retry-on etc. Note this applies to - # the headers Envoy will generate, the headers Envoy will sanitize, and the - # headers Envoy will trust for core code and core extensions only. Be VERY - # careful making changes to this string, especially in multi-layer Envoy - # deployments or deployments using extensions which are not upstream. - header_prefix: str = betterproto.string_field(18) - # Optional proxy version which will be used to set the value of - # :ref:`server.version statistic ` if specified. Envoy - # will not process this value, it will be sent as is to :ref:`stats sinks - # `. - stats_server_version_override: Optional[int] = betterproto.message_field( - 19, wraps=betterproto.TYPE_UINT64 - ) - # Always use TCP queries instead of UDP queries for DNS lookups. This may be - # overridden on a per-cluster basis in cds_config, when :ref:`dns_resolvers - # ` and :ref:`use_tcp_for_dns_lookups - # ` are specified. Setting - # this value causes failure if the - # ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is - # true during server startup. Apple' API only uses UDP for DNS resolution. - use_tcp_for_dns_lookups: bool = betterproto.bool_field(20) - - def __post_init__(self) -> None: - super().__post_init__() - if self.runtime: - warnings.warn("Bootstrap.runtime is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class BootstrapStaticResources(betterproto.Message): - # Static :ref:`Listeners `. These listeners are - # available regardless of LDS configuration. - listeners: List["___api_v2__.Listener"] = betterproto.message_field(1) - # If a network based configuration source is specified for :ref:`cds_config < - # envoy_api_field_config.bootstrap.v2.Bootstrap.DynamicResources.cds_config>` - # , it's necessary to have some initial cluster definitions available to - # allow Envoy to know how to speak to the management server. These cluster - # definitions may not use :ref:`EDS ` (i.e. - # they should be static IP or DNS-based). - clusters: List["___api_v2__.Cluster"] = betterproto.message_field(2) - # These static secrets can be used by :ref:`SdsSecretConfig - # ` - secrets: List["___api_v2_auth__.Secret"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class BootstrapDynamicResources(betterproto.Message): - # All :ref:`Listeners ` are provided by a single - # :ref:`LDS ` configuration source. - lds_config: "___api_v2_core__.ConfigSource" = betterproto.message_field(1) - # All post-bootstrap :ref:`Cluster ` definitions are - # provided by a single :ref:`CDS ` - # configuration source. - cds_config: "___api_v2_core__.ConfigSource" = betterproto.message_field(2) - # A single :ref:`ADS ` source may be optionally - # specified. This must have :ref:`api_type - # ` :ref:`GRPC - # `. Only - # :ref:`ConfigSources ` that have the - # :ref:`ads ` field set will be - # streamed on the ADS channel. - ads_config: "___api_v2_core__.ApiConfigSource" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Admin(betterproto.Message): - """ - Administration interface :ref:`operations documentation - `. - """ - - # The path to write the access log for the administration server. If no - # access log is desired specify ‘/dev/null’. This is only required if - # :ref:`address ` is set. - access_log_path: str = betterproto.string_field(1) - # The cpu profiler output path for the administration server. If no profile - # path is specified, the default is ‘/var/log/envoy/envoy.prof’. - profile_path: str = betterproto.string_field(2) - # The TCP address that the administration server will listen on. If not - # specified, Envoy will not start an administration server. - address: "___api_v2_core__.Address" = betterproto.message_field(3) - # Additional socket options that may not be present in Envoy source code or - # precompiled binaries. - socket_options: List["___api_v2_core__.SocketOption"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterManager(betterproto.Message): - """ - Cluster manager :ref:`architecture overview - `. - """ - - # Name of the local cluster (i.e., the cluster that owns the Envoy running - # this configuration). In order to enable :ref:`zone aware routing - # ` this option must be set. - # If *local_cluster_name* is defined then :ref:`clusters - # ` must be defined in the :ref:`Bootstrap static - # cluster resources - # `. - # This is unrelated to the :option:`--service-cluster` option which does not - # `affect zone aware routing - # `_. - local_cluster_name: str = betterproto.string_field(1) - # Optional global configuration for outlier detection. - outlier_detection: "ClusterManagerOutlierDetection" = betterproto.message_field(2) - # Optional configuration used to bind newly established upstream connections. - # This may be overridden on a per-cluster basis by upstream_bind_config in - # the cds_config. - upstream_bind_config: "___api_v2_core__.BindConfig" = betterproto.message_field(3) - # A management server endpoint to stream load stats to via *StreamLoadStats*. - # This must have :ref:`api_type - # ` :ref:`GRPC - # `. - load_stats_config: "___api_v2_core__.ApiConfigSource" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterManagerOutlierDetection(betterproto.Message): - # Specifies the path to the outlier event log. - event_log_path: str = betterproto.string_field(1) - # [#not-implemented-hide:] The gRPC service for the outlier detection event - # service. If empty, outlier detection events won't be sent to a remote - # endpoint. - event_service: "___api_v2_core__.EventServiceConfig" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Watchdog(betterproto.Message): - """ - Envoy process watchdog configuration. When configured, this monitors for - nonresponsive threads and kills the process after the configured - thresholds. See the :ref:`watchdog documentation - ` for more information. - """ - - # The duration after which Envoy counts a nonresponsive thread in the - # *watchdog_miss* statistic. If not specified the default is 200ms. - miss_timeout: timedelta = betterproto.message_field(1) - # The duration after which Envoy counts a nonresponsive thread in the - # *watchdog_mega_miss* statistic. If not specified the default is 1000ms. - megamiss_timeout: timedelta = betterproto.message_field(2) - # If a watched thread has been nonresponsive for this duration, assume a - # programming error and kill the entire Envoy process. Set to 0 to disable - # kill behavior. If not specified the default is 0 (disabled). - kill_timeout: timedelta = betterproto.message_field(3) - # If at least two watched threads have been nonresponsive for at least this - # duration assume a true deadlock and kill the entire Envoy process. Set to 0 - # to disable this behavior. If not specified the default is 0 (disabled). - multikill_timeout: timedelta = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class Runtime(betterproto.Message): - """Runtime :ref:`configuration overview ` (deprecated).""" - - # The implementation assumes that the file system tree is accessed via a - # symbolic link. An atomic link swap is used when a new tree should be - # switched to. This parameter specifies the path to the symbolic link. Envoy - # will watch the location for changes and reload the file system tree when - # they happen. If this parameter is not set, there will be no disk based - # runtime. - symlink_root: str = betterproto.string_field(1) - # Specifies the subdirectory to load within the root directory. This is - # useful if multiple systems share the same delivery mechanism. Envoy - # configuration elements can be contained in a dedicated subdirectory. - subdirectory: str = betterproto.string_field(2) - # Specifies an optional subdirectory to load within the root directory. If - # specified and the directory exists, configuration values within this - # directory will override those found in the primary subdirectory. This is - # useful when Envoy is deployed across many different types of servers. - # Sometimes it is useful to have a per service cluster directory for runtime - # configuration. See below for exactly how the override directory is used. - override_subdirectory: str = betterproto.string_field(3) - # Static base runtime. This will be :ref:`overridden - # ` by other runtime layers, e.g. disk or admin. - # This follows the :ref:`runtime protobuf JSON representation encoding - # `. - base: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RuntimeLayer(betterproto.Message): - """[#next-free-field: 6]""" - - # Descriptive name for the runtime layer. This is only used for the runtime - # :http:get:`/runtime` output. - name: str = betterproto.string_field(1) - # :ref:`Static runtime ` layer. This follows the - # :ref:`runtime protobuf JSON representation encoding - # `. Unlike static xDS resources, this static - # layer is overridable by later layers in the runtime virtual filesystem. - static_layer: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="layer_specifier" - ) - disk_layer: "RuntimeLayerDiskLayer" = betterproto.message_field( - 3, group="layer_specifier" - ) - admin_layer: "RuntimeLayerAdminLayer" = betterproto.message_field( - 4, group="layer_specifier" - ) - rtds_layer: "RuntimeLayerRtdsLayer" = betterproto.message_field( - 5, group="layer_specifier" - ) - - -@dataclass(eq=False, repr=False) -class RuntimeLayerDiskLayer(betterproto.Message): - """:ref:`Disk runtime ` layer.""" - - # The implementation assumes that the file system tree is accessed via a - # symbolic link. An atomic link swap is used when a new tree should be - # switched to. This parameter specifies the path to the symbolic link. Envoy - # will watch the location for changes and reload the file system tree when - # they happen. See documentation on runtime :ref:`atomicity - # ` for further details on how reloads are treated. - symlink_root: str = betterproto.string_field(1) - # Specifies the subdirectory to load within the root directory. This is - # useful if multiple systems share the same delivery mechanism. Envoy - # configuration elements can be contained in a dedicated subdirectory. - subdirectory: str = betterproto.string_field(3) - # :ref:`Append ` the - # service cluster to the path under symlink root. - append_service_cluster: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class RuntimeLayerAdminLayer(betterproto.Message): - """:ref:`Admin console runtime ` layer.""" - - pass - - -@dataclass(eq=False, repr=False) -class RuntimeLayerRtdsLayer(betterproto.Message): - """:ref:`Runtime Discovery Service (RTDS) ` layer.""" - - # Resource to subscribe to at *rtds_config* for the RTDS layer. - name: str = betterproto.string_field(1) - # RTDS configuration source. - rtds_config: "___api_v2_core__.ConfigSource" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class LayeredRuntime(betterproto.Message): - """Runtime :ref:`configuration overview `.""" - - # The :ref:`layers ` of the runtime. This is ordered - # such that later layers in the list overlay earlier entries. - layers: List["RuntimeLayer"] = betterproto.message_field(1) - - -from ....api import v2 as ___api_v2__ -from ....api.v2 import auth as ___api_v2_auth__ -from ....api.v2 import core as ___api_v2_core__ -from ...metrics import v2 as __metrics_v2__ -from ...overload import v2alpha as __overload_v2_alpha__ -from ...trace import v2 as __trace_v2__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/bootstrap/v3/__init__.py b/src/envoy_data_plane/envoy/config/bootstrap/v3/__init__.py deleted file mode 100644 index 7ffbabb..0000000 --- a/src/envoy_data_plane/envoy/config/bootstrap/v3/__init__.py +++ /dev/null @@ -1,555 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/bootstrap/v3/bootstrap.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class WatchdogWatchdogActionWatchdogEvent(betterproto.Enum): - UNKNOWN = 0 - KILL = 1 - MULTIKILL = 2 - MEGAMISS = 3 - MISS = 4 - - -class CustomInlineHeaderInlineHeaderType(betterproto.Enum): - REQUEST_HEADER = 0 - REQUEST_TRAILER = 1 - RESPONSE_HEADER = 2 - RESPONSE_TRAILER = 3 - - -@dataclass(eq=False, repr=False) -class Bootstrap(betterproto.Message): - """ - Bootstrap :ref:`configuration overview `. - [#next-free-field: 34] - """ - - # Node identity to present to the management server and for instance - # identification purposes (e.g. in generated headers). - node: "__core_v3__.Node" = betterproto.message_field(1) - # A list of :ref:`Node ` field names - # that will be included in the context parameters of the effective xdstp:// - # URL that is sent in a discovery request when resource locators are used for - # LDS/CDS. Any non-string field will have its JSON encoding set as the - # context parameter value, with the exception of metadata, which will be - # flattened (see example below). The supported field names are: - "cluster" - - # "id" - "locality.region" - "locality.sub_zone" - "locality.zone" - - # "metadata" - "user_agent_build_version.metadata" - - # "user_agent_build_version.version" - "user_agent_name" - - # "user_agent_version" The node context parameters act as a base layer - # dictionary for the context parameters (i.e. more specific resource specific - # context parameters will override). Field names will be prefixed with - # “udpa.node.” when included in context parameters. For example, if - # node_context_params is ``["user_agent_name", "metadata"]``, the implied - # context parameters might be:: node.user_agent_name: "envoy" - # node.metadata.foo: "{\"bar\": \"baz\"}" node.metadata.some: "42" - # node.metadata.thing: "\"thing\"" [#not-implemented-hide:] - node_context_params: List[str] = betterproto.string_field(26) - # Statically specified resources. - static_resources: "BootstrapStaticResources" = betterproto.message_field(2) - # xDS configuration sources. - dynamic_resources: "BootstrapDynamicResources" = betterproto.message_field(3) - # Configuration for the cluster manager which owns all upstream clusters - # within the server. - cluster_manager: "ClusterManager" = betterproto.message_field(4) - # Health discovery service config option. (:ref:`core.ApiConfigSource - # `) - hds_config: "__core_v3__.ApiConfigSource" = betterproto.message_field(14) - # Optional file system path to search for startup flag files. - flags_path: str = betterproto.string_field(5) - # Optional set of stats sinks. - stats_sinks: List["__metrics_v3__.StatsSink"] = betterproto.message_field(6) - # Configuration for internal processing of stats. - stats_config: "__metrics_v3__.StatsConfig" = betterproto.message_field(13) - # Optional duration between flushes to configured stats sinks. For - # performance reasons Envoy latches counters and only flushes counters and - # gauges at a periodic interval. If not specified the default is 5000ms (5 - # seconds). Only one of `stats_flush_interval` or `stats_flush_on_admin` can - # be set. Duration must be at least 1ms and at most 5 min. - stats_flush_interval: timedelta = betterproto.message_field(7) - # Flush stats to sinks only when queried for on the admin interface. If set, - # a flush timer is not created. Only one of `stats_flush_on_admin` or - # `stats_flush_interval` can be set. - stats_flush_on_admin: bool = betterproto.bool_field(29, group="stats_flush") - # Optional watchdog configuration. This is for a single watchdog - # configuration for the entire system. Deprecated in favor of *watchdogs* - # which has finer granularity. - watchdog: "Watchdog" = betterproto.message_field(8) - # Optional watchdogs configuration. This is used for specifying different - # watchdogs for the different subsystems. [#extension-category: - # envoy.guarddog_actions] - watchdogs: "Watchdogs" = betterproto.message_field(27) - # Configuration for an external tracing provider. .. attention:: This field - # has been deprecated in favor of - # :ref:`HttpConnectionManager.Tracing.provider `. - tracing: "__trace_v3__.Tracing" = betterproto.message_field(9) - # Configuration for the runtime configuration provider. If not specified, a - # “null” provider will be used which will result in all defaults being used. - layered_runtime: "LayeredRuntime" = betterproto.message_field(17) - # Configuration for the local administration HTTP server. - admin: "Admin" = betterproto.message_field(12) - # Optional overload manager configuration. - overload_manager: "__overload_v3__.OverloadManager" = betterproto.message_field(15) - # Enable :ref:`stats for event dispatcher `, defaults - # to false. Note that this records a value for each iteration of the event - # loop on every thread. This should normally be minimal overhead, but when - # using :ref:`statsd `, it - # will send each observed value over the wire individually because the statsd - # protocol doesn't have any way to represent a histogram summary. Be aware - # that this can be a very large volume of data. - enable_dispatcher_stats: bool = betterproto.bool_field(16) - # Optional string which will be used in lieu of x-envoy in prefixing headers. - # For example, if this string is present and set to X-Foo, then x-envoy- - # retry-on will be transformed into x-foo-retry-on etc. Note this applies to - # the headers Envoy will generate, the headers Envoy will sanitize, and the - # headers Envoy will trust for core code and core extensions only. Be VERY - # careful making changes to this string, especially in multi-layer Envoy - # deployments or deployments using extensions which are not upstream. - header_prefix: str = betterproto.string_field(18) - # Optional proxy version which will be used to set the value of - # :ref:`server.version statistic ` if specified. Envoy - # will not process this value, it will be sent as is to :ref:`stats sinks - # `. - stats_server_version_override: Optional[int] = betterproto.message_field( - 19, wraps=betterproto.TYPE_UINT64 - ) - # Always use TCP queries instead of UDP queries for DNS lookups. This may be - # overridden on a per-cluster basis in cds_config, when :ref:`dns_resolvers - # ` and - # :ref:`use_tcp_for_dns_lookups - # ` are - # specified. This field is deprecated in favor of *dns_resolution_config* - # which aggregates all of the DNS resolver configuration in a single message. - use_tcp_for_dns_lookups: bool = betterproto.bool_field(20) - # DNS resolution configuration which includes the underlying dns resolver - # addresses and options. This may be overridden on a per-cluster basis in - # cds_config, when :ref:`dns_resolution_config - # ` is - # specified. This field is deprecated in favor of - # :ref:`typed_dns_resolver_config `. - dns_resolution_config: "__core_v3__.DnsResolutionConfig" = ( - betterproto.message_field(30) - ) - # DNS resolver type configuration extension. This extension can be used to - # configure c-ares, apple, or any other DNS resolver types and the related - # parameters. For example, an object of :ref:`CaresDnsResolverConfig ` - # can be packed into this *typed_dns_resolver_config*. This configuration - # replaces the :ref:`dns_resolution_config - # ` - # configuration. During the transition period when both - # *dns_resolution_config* and *typed_dns_resolver_config* exists, when - # *typed_dns_resolver_config* is in place, Envoy will use it and ignore - # *dns_resolution_config*. When *typed_dns_resolver_config* is missing, the - # default behavior is in place. [#extension-category: - # envoy.network.dns_resolver] - typed_dns_resolver_config: "__core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(31) - ) - # Specifies optional bootstrap extensions to be instantiated at startup time. - # Each item contains extension specific configuration. [#extension-category: - # envoy.bootstrap] - bootstrap_extensions: List[ - "__core_v3__.TypedExtensionConfig" - ] = betterproto.message_field(21) - # Specifies optional extensions instantiated at startup time and invoked - # during crash time on the request that caused the crash. - fatal_actions: List["FatalAction"] = betterproto.message_field(28) - # Configuration sources that will participate in xdstp:// URL authority - # resolution. The algorithm is as follows: 1. The authority field is taken - # from the xdstp:// URL, call this *resource_authority*. 2. - # *resource_authority* is compared against the authorities in any peer - # *ConfigSource*. The peer *ConfigSource* is the configuration source - # message which would have been used unconditionally for resolution with - # opaque resource names. If there is a match with an authority, the peer - # *ConfigSource* message is used. 3. *resource_authority* is compared - # sequentially with the authorities in each configuration source in - # *config_sources*. The first *ConfigSource* to match wins. 4. As a - # fallback, if no configuration source matches, then - # *default_config_source* is used. 5. If *default_config_source* is not - # specified, resolution fails. [#not-implemented-hide:] - config_sources: List["__core_v3__.ConfigSource"] = betterproto.message_field(22) - # Default configuration source for xdstp:// URLs if all other resolution - # fails. [#not-implemented-hide:] - default_config_source: "__core_v3__.ConfigSource" = betterproto.message_field(23) - # Optional overriding of default socket interface. The value must be the name - # of one of the socket interface factories initialized through a bootstrap - # extension - default_socket_interface: str = betterproto.string_field(24) - # Global map of CertificateProvider instances. These instances are referred - # to by name in the - # :ref:`CommonTlsContext.CertificateProviderInstance.instance_name ` field. [#not-implemented-hide:] - certificate_provider_instances: Dict[ - str, "__core_v3__.TypedExtensionConfig" - ] = betterproto.map_field(25, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Specifies a set of headers that need to be registered as inline header. - # This configuration allows users to customize the inline headers on-demand - # at Envoy startup without modifying Envoy's source code. Note that the 'set- - # cookie' header cannot be registered as inline header. - inline_headers: List["CustomInlineHeader"] = betterproto.message_field(32) - # Optional path to a file with performance tracing data created by "Perfetto" - # SDK in binary ProtoBuf format. The default value is "envoy.pftrace". - perf_tracing_file_path: str = betterproto.string_field(33) - - def __post_init__(self) -> None: - super().__post_init__() - if self.watchdog: - warnings.warn("Bootstrap.watchdog is deprecated", DeprecationWarning) - if self.tracing: - warnings.warn("Bootstrap.tracing is deprecated", DeprecationWarning) - if self.use_tcp_for_dns_lookups: - warnings.warn( - "Bootstrap.use_tcp_for_dns_lookups is deprecated", DeprecationWarning - ) - if self.dns_resolution_config: - warnings.warn( - "Bootstrap.dns_resolution_config is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class BootstrapStaticResources(betterproto.Message): - # Static :ref:`Listeners `. - # These listeners are available regardless of LDS configuration. - listeners: List["__listener_v3__.Listener"] = betterproto.message_field(1) - # If a network based configuration source is specified for :ref:`cds_config < - # envoy_v3_api_field_config.bootstrap.v3.Bootstrap.DynamicResources.cds_confi - # g>`, it's necessary to have some initial cluster definitions available to - # allow Envoy to know how to speak to the management server. These cluster - # definitions may not use :ref:`EDS ` (i.e. - # they should be static IP or DNS-based). - clusters: List["__cluster_v3__.Cluster"] = betterproto.message_field(2) - # These static secrets can be used by :ref:`SdsSecretConfig - # ` - secrets: List[ - "___extensions_transport_sockets_tls_v3__.Secret" - ] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class BootstrapDynamicResources(betterproto.Message): - """[#next-free-field: 7]""" - - # All :ref:`Listeners ` are - # provided by a single :ref:`LDS ` - # configuration source. - lds_config: "__core_v3__.ConfigSource" = betterproto.message_field(1) - # xdstp:// resource locator for listener collection. [#not-implemented-hide:] - lds_resources_locator: str = betterproto.string_field(5) - # All post-bootstrap :ref:`Cluster - # ` definitions are provided by a - # single :ref:`CDS ` configuration source. - cds_config: "__core_v3__.ConfigSource" = betterproto.message_field(2) - # xdstp:// resource locator for cluster collection. [#not-implemented-hide:] - cds_resources_locator: str = betterproto.string_field(6) - # A single :ref:`ADS ` source may be optionally - # specified. This must have :ref:`api_type - # ` :ref:`GRPC - # `. - # Only :ref:`ConfigSources ` - # that have the :ref:`ads - # ` field set will be - # streamed on the ADS channel. - ads_config: "__core_v3__.ApiConfigSource" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Admin(betterproto.Message): - """ - Administration interface :ref:`operations documentation - `. [#next-free-field: 7] - """ - - # Configuration for :ref:`access logs ` emitted by - # the administration server. - access_log: List["__accesslog_v3__.AccessLog"] = betterproto.message_field(5) - # The path to write the access log for the administration server. If no - # access log is desired specify ‘/dev/null’. This is only required if - # :ref:`address ` is - # set. Deprecated in favor of *access_log* which offers more options. - access_log_path: str = betterproto.string_field(1) - # The cpu profiler output path for the administration server. If no profile - # path is specified, the default is ‘/var/log/envoy/envoy.prof’. - profile_path: str = betterproto.string_field(2) - # The TCP address that the administration server will listen on. If not - # specified, Envoy will not start an administration server. - address: "__core_v3__.Address" = betterproto.message_field(3) - # Additional socket options that may not be present in Envoy source code or - # precompiled binaries. - socket_options: List["__core_v3__.SocketOption"] = betterproto.message_field(4) - # Indicates whether :ref:`global_downstream_max_connections - # ` should apply to the admin - # interface or not. - ignore_global_conn_limit: bool = betterproto.bool_field(6) - - def __post_init__(self) -> None: - super().__post_init__() - if self.access_log_path: - warnings.warn("Admin.access_log_path is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ClusterManager(betterproto.Message): - """ - Cluster manager :ref:`architecture overview - `. - """ - - # Name of the local cluster (i.e., the cluster that owns the Envoy running - # this configuration). In order to enable :ref:`zone aware routing - # ` this option must be set. - # If *local_cluster_name* is defined then :ref:`clusters - # ` must be defined in the - # :ref:`Bootstrap static cluster resources `. This is unrelated to the - # :option:`--service-cluster` option which does not `affect zone aware - # routing `_. - local_cluster_name: str = betterproto.string_field(1) - # Optional global configuration for outlier detection. - outlier_detection: "ClusterManagerOutlierDetection" = betterproto.message_field(2) - # Optional configuration used to bind newly established upstream connections. - # This may be overridden on a per-cluster basis by upstream_bind_config in - # the cds_config. - upstream_bind_config: "__core_v3__.BindConfig" = betterproto.message_field(3) - # A management server endpoint to stream load stats to via *StreamLoadStats*. - # This must have :ref:`api_type - # ` :ref:`GRPC - # `. - load_stats_config: "__core_v3__.ApiConfigSource" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterManagerOutlierDetection(betterproto.Message): - # Specifies the path to the outlier event log. - event_log_path: str = betterproto.string_field(1) - # [#not-implemented-hide:] The gRPC service for the outlier detection event - # service. If empty, outlier detection events won't be sent to a remote - # endpoint. - event_service: "__core_v3__.EventServiceConfig" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Watchdogs(betterproto.Message): - """ - Allows you to specify different watchdog configs for different subsystems. - This allows finer tuned policies for the watchdog. If a subsystem is - omitted the default values for that system will be used. - """ - - # Watchdog for the main thread. - main_thread_watchdog: "Watchdog" = betterproto.message_field(1) - # Watchdog for the worker threads. - worker_watchdog: "Watchdog" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Watchdog(betterproto.Message): - """ - Envoy process watchdog configuration. When configured, this monitors for - nonresponsive threads and kills the process after the configured - thresholds. See the :ref:`watchdog documentation - ` for more information. [#next-free-field: - 8] - """ - - # Register actions that will fire on given WatchDog events. See - # *WatchDogAction* for priority of events. - actions: List["WatchdogWatchdogAction"] = betterproto.message_field(7) - # The duration after which Envoy counts a nonresponsive thread in the - # *watchdog_miss* statistic. If not specified the default is 200ms. - miss_timeout: timedelta = betterproto.message_field(1) - # The duration after which Envoy counts a nonresponsive thread in the - # *watchdog_mega_miss* statistic. If not specified the default is 1000ms. - megamiss_timeout: timedelta = betterproto.message_field(2) - # If a watched thread has been nonresponsive for this duration, assume a - # programming error and kill the entire Envoy process. Set to 0 to disable - # kill behavior. If not specified the default is 0 (disabled). - kill_timeout: timedelta = betterproto.message_field(3) - # Defines the maximum jitter used to adjust the *kill_timeout* if - # *kill_timeout* is enabled. Enabling this feature would help to reduce risk - # of synchronized watchdog kill events across proxies due to external - # triggers. Set to 0 to disable. If not specified the default is 0 - # (disabled). - max_kill_timeout_jitter: timedelta = betterproto.message_field(6) - # If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) - # threads have been nonresponsive for at least this duration kill the entire - # Envoy process. Set to 0 to disable this behavior. If not specified the - # default is 0 (disabled). - multikill_timeout: timedelta = betterproto.message_field(4) - # Sets the threshold for *multikill_timeout* in terms of the percentage of - # nonresponsive threads required for the *multikill_timeout*. If not - # specified the default is 0. - multikill_threshold: "___type_v3__.Percent" = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class WatchdogWatchdogAction(betterproto.Message): - # Extension specific configuration for the action. - config: "__core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - event: "WatchdogWatchdogActionWatchdogEvent" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class FatalAction(betterproto.Message): - """ - Fatal actions to run while crashing. Actions can be safe (meaning they are - async-signal safe) or unsafe. We run all safe actions before we run unsafe - actions. If using an unsafe action that could get stuck or deadlock, it - important to have an out of band system to terminate the process. The - interface for the extension is - ``Envoy::Server::Configuration::FatalAction``. *FatalAction* extensions - live in the ``envoy.extensions.fatal_actions`` API namespace. - """ - - # Extension specific configuration for the action. It's expected to conform - # to the ``Envoy::Server::Configuration::FatalAction`` interface. - config: "__core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Runtime(betterproto.Message): - """Runtime :ref:`configuration overview ` (deprecated).""" - - # The implementation assumes that the file system tree is accessed via a - # symbolic link. An atomic link swap is used when a new tree should be - # switched to. This parameter specifies the path to the symbolic link. Envoy - # will watch the location for changes and reload the file system tree when - # they happen. If this parameter is not set, there will be no disk based - # runtime. - symlink_root: str = betterproto.string_field(1) - # Specifies the subdirectory to load within the root directory. This is - # useful if multiple systems share the same delivery mechanism. Envoy - # configuration elements can be contained in a dedicated subdirectory. - subdirectory: str = betterproto.string_field(2) - # Specifies an optional subdirectory to load within the root directory. If - # specified and the directory exists, configuration values within this - # directory will override those found in the primary subdirectory. This is - # useful when Envoy is deployed across many different types of servers. - # Sometimes it is useful to have a per service cluster directory for runtime - # configuration. See below for exactly how the override directory is used. - override_subdirectory: str = betterproto.string_field(3) - # Static base runtime. This will be :ref:`overridden - # ` by other runtime layers, e.g. disk or admin. - # This follows the :ref:`runtime protobuf JSON representation encoding - # `. - base: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RuntimeLayer(betterproto.Message): - """[#next-free-field: 6]""" - - # Descriptive name for the runtime layer. This is only used for the runtime - # :http:get:`/runtime` output. - name: str = betterproto.string_field(1) - # :ref:`Static runtime ` layer. This follows the - # :ref:`runtime protobuf JSON representation encoding - # `. Unlike static xDS resources, this static - # layer is overridable by later layers in the runtime virtual filesystem. - static_layer: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="layer_specifier" - ) - disk_layer: "RuntimeLayerDiskLayer" = betterproto.message_field( - 3, group="layer_specifier" - ) - admin_layer: "RuntimeLayerAdminLayer" = betterproto.message_field( - 4, group="layer_specifier" - ) - rtds_layer: "RuntimeLayerRtdsLayer" = betterproto.message_field( - 5, group="layer_specifier" - ) - - -@dataclass(eq=False, repr=False) -class RuntimeLayerDiskLayer(betterproto.Message): - """:ref:`Disk runtime ` layer.""" - - # The implementation assumes that the file system tree is accessed via a - # symbolic link. An atomic link swap is used when a new tree should be - # switched to. This parameter specifies the path to the symbolic link. Envoy - # will watch the location for changes and reload the file system tree when - # they happen. See documentation on runtime :ref:`atomicity - # ` for further details on how reloads are treated. - symlink_root: str = betterproto.string_field(1) - # Specifies the subdirectory to load within the root directory. This is - # useful if multiple systems share the same delivery mechanism. Envoy - # configuration elements can be contained in a dedicated subdirectory. - subdirectory: str = betterproto.string_field(3) - # :ref:`Append ` the - # service cluster to the path under symlink root. - append_service_cluster: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class RuntimeLayerAdminLayer(betterproto.Message): - """:ref:`Admin console runtime ` layer.""" - - pass - - -@dataclass(eq=False, repr=False) -class RuntimeLayerRtdsLayer(betterproto.Message): - """:ref:`Runtime Discovery Service (RTDS) ` layer.""" - - # Resource to subscribe to at *rtds_config* for the RTDS layer. - name: str = betterproto.string_field(1) - # RTDS configuration source. - rtds_config: "__core_v3__.ConfigSource" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class LayeredRuntime(betterproto.Message): - """Runtime :ref:`configuration overview `.""" - - # The :ref:`layers ` of the runtime. This is ordered - # such that later layers in the list overlay earlier entries. - layers: List["RuntimeLayer"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CustomInlineHeader(betterproto.Message): - """ - Used to specify the header that needs to be registered as an inline header. - If request or response contain multiple headers with the same name and the - header name is registered as an inline header. Then multiple headers will - be folded into one, and multiple header values will be concatenated by a - suitable delimiter. The delimiter is generally a comma. For example, if - 'foo' is registered as an inline header, and the headers contains the - following two headers: .. code-block:: text foo: bar foo: eep Then they - will eventually be folded into: .. code-block:: text foo: bar, eep Inline - headers provide O(1) search performance, but each inline header imposes an - additional memory overhead on all instances of the corresponding type of - HeaderMap or TrailerMap. - """ - - # The name of the header that is expected to be set as the inline header. - inline_header_name: str = betterproto.string_field(1) - # The type of the header that is expected to be set as the inline header. - inline_header_type: "CustomInlineHeaderInlineHeaderType" = betterproto.enum_field(2) - - -from ....extensions.transport_sockets.tls import ( - v3 as ___extensions_transport_sockets_tls_v3__, -) -from ....type import v3 as ___type_v3__ -from ...accesslog import v3 as __accesslog_v3__ -from ...cluster import v3 as __cluster_v3__ -from ...core import v3 as __core_v3__ -from ...listener import v3 as __listener_v3__ -from ...metrics import v3 as __metrics_v3__ -from ...overload import v3 as __overload_v3__ -from ...trace import v3 as __trace_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/cluster/__init__.py b/src/envoy_data_plane/envoy/config/cluster/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/cluster/aggregate/__init__.py b/src/envoy_data_plane/envoy/config/cluster/aggregate/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/cluster/aggregate/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/cluster/aggregate/v2alpha/__init__.py deleted file mode 100644 index f9cd9b3..0000000 --- a/src/envoy_data_plane/envoy/config/cluster/aggregate/v2alpha/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/cluster/aggregate/v2alpha/cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ClusterConfig(betterproto.Message): - """ - Configuration for the aggregate cluster. See the :ref:`architecture - overview ` for more information. - [#extension: envoy.clusters.aggregate] - """ - - # Load balancing clusters in aggregate cluster. Clusters are prioritized - # based on the order they appear in this list. - clusters: List[str] = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/config/cluster/dynamic_forward_proxy/__init__.py b/src/envoy_data_plane/envoy/config/cluster/dynamic_forward_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/cluster/dynamic_forward_proxy/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/cluster/dynamic_forward_proxy/v2alpha/__init__.py deleted file mode 100644 index 6c666ae..0000000 --- a/src/envoy_data_plane/envoy/config/cluster/dynamic_forward_proxy/v2alpha/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ClusterConfig(betterproto.Message): - """ - Configuration for the dynamic forward proxy cluster. See the - :ref:`architecture overview ` for - more information. [#extension: envoy.clusters.dynamic_forward_proxy] - """ - - # The DNS cache configuration that the cluster will attach to. Note this - # configuration must match that of associated :ref:`dynamic forward proxy - # HTTP filter configuration `. - dns_cache_config: "___common_dynamic_forward_proxy_v2_alpha__.DnsCacheConfig" = ( - betterproto.message_field(1) - ) - - -from ....common.dynamic_forward_proxy import ( - v2alpha as ___common_dynamic_forward_proxy_v2_alpha__, -) diff --git a/src/envoy_data_plane/envoy/config/cluster/redis/__init__.py b/src/envoy_data_plane/envoy/config/cluster/redis/__init__.py deleted file mode 100644 index 75f4fee..0000000 --- a/src/envoy_data_plane/envoy/config/cluster/redis/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/cluster/redis/redis_cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RedisClusterConfig(betterproto.Message): - """[#next-free-field: 7]""" - - # Interval between successive topology refresh requests. If not set, this - # defaults to 5s. - cluster_refresh_rate: timedelta = betterproto.message_field(1) - # Timeout for topology refresh request. If not set, this defaults to 3s. - cluster_refresh_timeout: timedelta = betterproto.message_field(2) - # The minimum interval that must pass after triggering a topology refresh - # request before a new request can possibly be triggered again. Any errors - # received during one of these time intervals are ignored. If not set, this - # defaults to 5s. - redirect_refresh_interval: timedelta = betterproto.message_field(3) - # The number of redirection errors that must be received before triggering a - # topology refresh request. If not set, this defaults to 5. If this is set to - # 0, topology refresh after redirect is disabled. - redirect_refresh_threshold: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The number of failures that must be received before triggering a topology - # refresh request. If not set, this defaults to 0, which disables the - # topology refresh due to failure. - failure_refresh_threshold: int = betterproto.uint32_field(5) - # The number of hosts became degraded or unhealthy before triggering a - # topology refresh request. If not set, this defaults to 0, which disables - # the topology refresh due to degraded or unhealthy host. - host_degraded_refresh_threshold: int = betterproto.uint32_field(6) diff --git a/src/envoy_data_plane/envoy/config/cluster/v3/__init__.py b/src/envoy_data_plane/envoy/config/cluster/v3/__init__.py deleted file mode 100644 index e1fe3be..0000000 --- a/src/envoy_data_plane/envoy/config/cluster/v3/__init__.py +++ /dev/null @@ -1,1299 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/cluster/v3/circuit_breaker.proto, envoy/config/cluster/v3/cluster.proto, envoy/config/cluster/v3/filter.proto, envoy/config/cluster/v3/outlier_detection.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ClusterDiscoveryType(betterproto.Enum): - STATIC = 0 - STRICT_DNS = 1 - LOGICAL_DNS = 2 - EDS = 3 - ORIGINAL_DST = 4 - - -class ClusterLbPolicy(betterproto.Enum): - ROUND_ROBIN = 0 - LEAST_REQUEST = 1 - RING_HASH = 2 - RANDOM = 3 - MAGLEV = 5 - CLUSTER_PROVIDED = 6 - LOAD_BALANCING_POLICY_CONFIG = 7 - - -class ClusterDnsLookupFamily(betterproto.Enum): - AUTO = 0 - V4_ONLY = 1 - V6_ONLY = 2 - V4_PREFERRED = 3 - ALL = 4 - - -class ClusterClusterProtocolSelection(betterproto.Enum): - USE_CONFIGURED_PROTOCOL = 0 - USE_DOWNSTREAM_PROTOCOL = 1 - - -class ClusterLbSubsetConfigLbSubsetFallbackPolicy(betterproto.Enum): - NO_FALLBACK = 0 - ANY_ENDPOINT = 1 - DEFAULT_SUBSET = 2 - - -class ClusterLbSubsetConfigLbSubsetSelectorLbSubsetSelectorFallbackPolicy( - betterproto.Enum -): - NOT_DEFINED = 0 - NO_FALLBACK = 1 - ANY_ENDPOINT = 2 - DEFAULT_SUBSET = 3 - KEYS_SUBSET = 4 - - -class ClusterRingHashLbConfigHashFunction(betterproto.Enum): - XX_HASH = 0 - MURMUR_HASH_2 = 1 - - -@dataclass(eq=False, repr=False) -class Filter(betterproto.Message): - # The name of the filter to instantiate. The name must match a supported - # upstream filter. Note that Envoy's :ref:`downstream network filters - # ` are not valid upstream filters. - name: str = betterproto.string_field(1) - # Filter specific configuration which depends on the filter being - # instantiated. See the supported filters for further documentation. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class OutlierDetection(betterproto.Message): - """ - See the :ref:`architecture overview ` for - more information on outlier detection. [#next-free-field: 22] - """ - - # The number of consecutive 5xx responses or local origin errors that are - # mapped to 5xx error codes before a consecutive 5xx ejection occurs. - # Defaults to 5. - consecutive_5_xx: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # The time interval between ejection analysis sweeps. This can result in both - # new ejections as well as hosts being returned to service. Defaults to - # 10000ms or 10s. - interval: timedelta = betterproto.message_field(2) - # The base time that a host is ejected for. The real time is equal to the - # base time multiplied by the number of times the host has been ejected and - # is capped by :ref:`max_ejection_time`. Defaults to 30000ms or 30s. - base_ejection_time: timedelta = betterproto.message_field(3) - # The maximum % of an upstream cluster that can be ejected due to outlier - # detection. Defaults to 10% but will eject at least one host regardless of - # the value. - max_ejection_percent: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through consecutive 5xx. This setting can be used to disable - # ejection or to ramp it up slowly. Defaults to 100. - enforcing_consecutive_5_xx: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through success rate statistics. This setting can be used to - # disable ejection or to ramp it up slowly. Defaults to 100. - enforcing_success_rate: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # The number of hosts in a cluster that must have enough request volume to - # detect success rate outliers. If the number of hosts is less than this - # setting, outlier detection via success rate statistics is not performed for - # any host in the cluster. Defaults to 5. - success_rate_minimum_hosts: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # The minimum number of total requests that must be collected in one interval - # (as defined by the interval duration above) to include this host in success - # rate based outlier detection. If the volume is lower than this setting, - # outlier detection via success rate statistics is not performed for that - # host. Defaults to 100. - success_rate_request_volume: Optional[int] = betterproto.message_field( - 8, wraps=betterproto.TYPE_UINT32 - ) - # This factor is used to determine the ejection threshold for success rate - # outlier ejection. The ejection threshold is the difference between the mean - # success rate, and the product of this factor and the standard deviation of - # the mean success rate: mean - (stdev * success_rate_stdev_factor). This - # factor is divided by a thousand to get a double. That is, if the desired - # factor is 1.9, the runtime value should be 1900. Defaults to 1900. - success_rate_stdev_factor: Optional[int] = betterproto.message_field( - 9, wraps=betterproto.TYPE_UINT32 - ) - # The number of consecutive gateway failures (502, 503, 504 status codes) - # before a consecutive gateway failure ejection occurs. Defaults to 5. - consecutive_gateway_failure: Optional[int] = betterproto.message_field( - 10, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through consecutive gateway failures. This setting can be used to - # disable ejection or to ramp it up slowly. Defaults to 0. - enforcing_consecutive_gateway_failure: Optional[int] = betterproto.message_field( - 11, wraps=betterproto.TYPE_UINT32 - ) - # Determines whether to distinguish local origin failures from external - # errors. If set to true the following configuration parameters are taken - # into account: :ref:`consecutive_local_origin_failure`, :ref:`e - # nforcing_consecutive_local_origin_failure` and :ref: - # `enforcing_local_origin_success_rate`. Defaults to false. - split_external_local_origin_errors: bool = betterproto.bool_field(12) - # The number of consecutive locally originated failures before ejection - # occurs. Defaults to 5. Parameter takes effect only when :ref:`split_externa - # l_local_origin_errors` is set to true. - consecutive_local_origin_failure: Optional[int] = betterproto.message_field( - 13, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through consecutive locally originated failures. This setting can - # be used to disable ejection or to ramp it up slowly. Defaults to 100. - # Parameter takes effect only when :ref:`split_external_local_origin_errors` is set to true. - enforcing_consecutive_local_origin_failure: Optional[ - int - ] = betterproto.message_field(14, wraps=betterproto.TYPE_UINT32) - # The % chance that a host will be actually ejected when an outlier status is - # detected through success rate statistics for locally originated errors. - # This setting can be used to disable ejection or to ramp it up slowly. - # Defaults to 100. Parameter takes effect only when :ref:`split_external_loca - # l_origin_errors` is set to true. - enforcing_local_origin_success_rate: Optional[int] = betterproto.message_field( - 15, wraps=betterproto.TYPE_UINT32 - ) - # The failure percentage to use when determining failure percentage-based - # outlier detection. If the failure percentage of a given host is greater - # than or equal to this value, it will be ejected. Defaults to 85. - failure_percentage_threshold: Optional[int] = betterproto.message_field( - 16, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through failure percentage statistics. This setting can be used to - # disable ejection or to ramp it up slowly. Defaults to 0. [#next-major- - # version: setting this without setting failure_percentage_threshold should - # be invalid in v4.] - enforcing_failure_percentage: Optional[int] = betterproto.message_field( - 17, wraps=betterproto.TYPE_UINT32 - ) - # The % chance that a host will be actually ejected when an outlier status is - # detected through local-origin failure percentage statistics. This setting - # can be used to disable ejection or to ramp it up slowly. Defaults to 0. - enforcing_failure_percentage_local_origin: Optional[ - int - ] = betterproto.message_field(18, wraps=betterproto.TYPE_UINT32) - # The minimum number of hosts in a cluster in order to perform failure - # percentage-based ejection. If the total number of hosts in the cluster is - # less than this value, failure percentage-based ejection will not be - # performed. Defaults to 5. - failure_percentage_minimum_hosts: Optional[int] = betterproto.message_field( - 19, wraps=betterproto.TYPE_UINT32 - ) - # The minimum number of total requests that must be collected in one interval - # (as defined by the interval duration above) to perform failure percentage- - # based ejection for this host. If the volume is lower than this setting, - # failure percentage-based ejection will not be performed for this host. - # Defaults to 50. - failure_percentage_request_volume: Optional[int] = betterproto.message_field( - 20, wraps=betterproto.TYPE_UINT32 - ) - # The maximum time that a host is ejected for. See :ref:`base_ejection_time` - # for more information. If not specified, the default value (300000ms or - # 300s) or :ref:`base_ejection_time` value is applied, whatever is larger. - max_ejection_time: timedelta = betterproto.message_field(21) - - -@dataclass(eq=False, repr=False) -class CircuitBreakers(betterproto.Message): - """ - :ref:`Circuit breaking` settings can be - specified individually for each defined priority. - """ - - # If multiple :ref:`Thresholds` are defined with the same - # :ref:`RoutingPriority`, - # the first one in the list is used. If no Thresholds is defined for a given - # :ref:`RoutingPriority`, - # the default values are used. - thresholds: List["CircuitBreakersThresholds"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CircuitBreakersThresholds(betterproto.Message): - """ - A Thresholds defines CircuitBreaker settings for a - :ref:`RoutingPriority`. - [#next-free-field: 9] - """ - - # The - # :ref:`RoutingPriority` - # the specified CircuitBreaker settings apply to. - priority: "__core_v3__.RoutingPriority" = betterproto.enum_field(1) - # The maximum number of connections that Envoy will make to the upstream - # cluster. If not specified, the default is 1024. - max_connections: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The maximum number of pending requests that Envoy will allow to the - # upstream cluster. If not specified, the default is 1024. This limit is - # applied as a connection limit for non-HTTP traffic. - max_pending_requests: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # The maximum number of parallel requests that Envoy will make to the - # upstream cluster. If not specified, the default is 1024. This limit does - # not apply to non-HTTP traffic. - max_requests: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The maximum number of parallel retries that Envoy will allow to the - # upstream cluster. If not specified, the default is 3. - max_retries: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # Specifies a limit on concurrent retries in relation to the number of active - # requests. This parameter is optional. .. note:: If this field is set, - # the retry budget will override any configured retry circuit breaker. - retry_budget: "CircuitBreakersThresholdsRetryBudget" = betterproto.message_field(8) - # If track_remaining is true, then stats will be published that expose the - # number of resources remaining until the circuit breakers open. If not - # specified, the default is false. .. note:: If a retry budget is used in - # lieu of the max_retries circuit breaker, the remaining retry resources - # remaining will not be tracked. - track_remaining: bool = betterproto.bool_field(6) - # The maximum number of connection pools per cluster that Envoy will - # concurrently support at once. If not specified, the default is unlimited. - # Set this for clusters which create a large number of connection pools. See - # :ref:`Circuit Breaking - # ` for more - # details. - max_connection_pools: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class CircuitBreakersThresholdsRetryBudget(betterproto.Message): - # Specifies the limit on concurrent retries as a percentage of the sum of - # active requests and active pending requests. For example, if there are 100 - # active requests and the budget_percent is set to 25, there may be 25 active - # retries. This parameter is optional. Defaults to 20%. - budget_percent: "___type_v3__.Percent" = betterproto.message_field(1) - # Specifies the minimum retry concurrency allowed for the retry budget. The - # limit on the number of active retries may never go below this number. This - # parameter is optional. Defaults to 3. - min_retry_concurrency: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class ClusterCollection(betterproto.Message): - """ - Cluster list collections. Entries are *Cluster* resources or references. - [#not-implemented-hide:] - """ - - entries: "____xds_core_v3__.CollectionEntry" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Cluster(betterproto.Message): - """Configuration for a single upstream cluster. [#next-free-field: 57]""" - - # Configuration to use different transport sockets for different endpoints. - # The entry of *envoy.transport_socket_match* in the - # :ref:`LbEndpoint.Metadata - # ` is used to - # match against the transport sockets as they appear in the list. The first - # :ref:`match - # ` is used. - # For example, with the following match .. code-block:: yaml - # transport_socket_matches: - name: "enableMTLS" match: acceptMTLS: - # true transport_socket: name: envoy.transport_sockets.tls - # config: { ... } # tls socket configuration - name: "defaultToPlaintext" - # match: {} transport_socket: name: - # envoy.transport_sockets.raw_buffer Connections to the endpoints whose - # metadata value under *envoy.transport_socket_match* having - # "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket - # configuration. If a :ref:`socket match - # ` with - # empty match criteria is provided, that always match any endpoint. For - # example, the "defaultToPlaintext" socket match in case above. If an - # endpoint metadata's value under *envoy.transport_socket_match* does not - # match any *TransportSocketMatch*, socket configuration fallbacks to use the - # *tls_context* or *transport_socket* specified in this cluster. This field - # allows gradual and flexible transport socket configuration changes. The - # metadata of endpoints in EDS can indicate transport socket capabilities. - # For example, an endpoint's metadata can have two key value pairs as - # "acceptMTLS": "true", "acceptPlaintext": "true". While some other - # endpoints, only accepting plaintext traffic has "acceptPlaintext": "true" - # metadata information. Then the xDS server can configure the CDS to a - # client, Envoy A, to send mutual TLS traffic for endpoints with - # "acceptMTLS": "true", by adding a corresponding *TransportSocketMatch* in - # this field. Other client Envoys receive CDS without - # *transport_socket_match* set, and still send plain text traffic to the same - # cluster. This field can be used to specify custom transport socket - # configurations for health checks by adding matching key/value pairs in a - # health check's :ref:`transport socket match criteria ` field. - # [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - transport_socket_matches: List[ - "ClusterTransportSocketMatch" - ] = betterproto.message_field(43) - # Supplies the name of the cluster which must be unique across all clusters. - # The cluster name is used when emitting :ref:`statistics - # ` if :ref:`alt_stat_name - # ` is not - # provided. Any ``:`` in the cluster name will be converted to ``_`` when - # emitting statistics. - name: str = betterproto.string_field(1) - # An optional alternative to the cluster name to be used for observability. - # This name is used emitting stats for the cluster and access logging the - # cluster name. This will appear as additional information in configuration - # dumps of a cluster's current status as :ref:`observability_name - # ` and as an - # additional tag "upstream_cluster.name" while tracing. Note: access logging - # using this field is presently enabled with runtime feature - # `envoy.reloadable_features.use_observable_cluster_name`. Any ``:`` in the - # name will be converted to ``_`` when emitting statistics. This should not - # be confused with :ref:`Router Filter Header `. - alt_stat_name: str = betterproto.string_field(28) - # The :ref:`service discovery type ` - # to use for resolving the cluster. - type: "ClusterDiscoveryType" = betterproto.enum_field( - 2, group="cluster_discovery_type" - ) - # The custom cluster type. - cluster_type: "ClusterCustomClusterType" = betterproto.message_field( - 38, group="cluster_discovery_type" - ) - # Configuration to use for EDS updates for the Cluster. - eds_cluster_config: "ClusterEdsClusterConfig" = betterproto.message_field(3) - # The timeout for new network connections to hosts in the cluster. If not - # set, a default value of 5s will be used. - connect_timeout: timedelta = betterproto.message_field(4) - # Soft limit on size of the cluster’s connections read and write buffers. If - # unspecified, an implementation defined default is applied (1MiB). - per_connection_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # The :ref:`load balancer type ` to use - # when picking a host in the cluster. - lb_policy: "ClusterLbPolicy" = betterproto.enum_field(6) - # Setting this is required for specifying members of :ref:`STATIC`, :ref:`STRICT - # _DNS` or :ref:`LOGICAL_DNS` clusters. This field supersedes the *hosts* - # field in the v2 API. .. attention:: Setting this allows non-EDS cluster - # types to contain embedded EDS equivalent :ref:`endpoint - # assignments`. - load_assignment: "__endpoint_v3__.ClusterLoadAssignment" = ( - betterproto.message_field(33) - ) - # Optional :ref:`active health checking ` - # configuration for the cluster. If no configuration is specified no health - # checking will be done and all cluster members will be considered healthy at - # all times. - health_checks: List["__core_v3__.HealthCheck"] = betterproto.message_field(8) - # Optional maximum requests for a single upstream connection. This parameter - # is respected by both the HTTP/1.1 and HTTP/2 connection pool - # implementations. If not specified, there is no limit. Setting this - # parameter to 1 will effectively disable keep alive. .. attention:: This - # field has been deprecated in favor of the :ref:`max_requests_per_connection - # ` field. - max_requests_per_connection: Optional[int] = betterproto.message_field( - 9, wraps=betterproto.TYPE_UINT32 - ) - # Optional :ref:`circuit breaking ` for the - # cluster. - circuit_breakers: "CircuitBreakers" = betterproto.message_field(10) - # HTTP protocol options that are applied only to upstream HTTP connections. - # These options apply to all HTTP versions. This has been deprecated in favor - # of :ref:`upstream_http_protocol_options ` in the - # :ref:`http_protocol_options - # ` - # message. upstream_http_protocol_options can be set via the cluster's :ref:` - # extension_protocol_options`. See :ref:`upstream_http_protocol_options < - # envoy_v3_api_field_extensions.upstreams.http.v3.HttpProtocolOptions.upstrea - # m_http_protocol_options>` for example usage. - upstream_http_protocol_options: "__core_v3__.UpstreamHttpProtocolOptions" = ( - betterproto.message_field(46) - ) - # Additional options when handling HTTP requests upstream. These options will - # be applicable to both HTTP1 and HTTP2 requests. This has been deprecated in - # favor of :ref:`common_http_protocol_options ` in the - # :ref:`http_protocol_options - # ` - # message. common_http_protocol_options can be set via the cluster's :ref:`ex - # tension_protocol_options`. See :ref:`upstream_http_protocol_options ` for example usage. - common_http_protocol_options: "__core_v3__.HttpProtocolOptions" = ( - betterproto.message_field(29) - ) - # Additional options when handling HTTP1 requests. This has been deprecated - # in favor of http_protocol_options fields in the :ref:`http_protocol_options - # ` - # message. http_protocol_options can be set via the cluster's :ref:`extension - # _protocol_options`. See :ref:`upstream_http_protocol_options ` for example usage. - http_protocol_options: "__core_v3__.Http1ProtocolOptions" = ( - betterproto.message_field(13) - ) - # Even if default HTTP2 protocol options are desired, this field must be set - # so that Envoy will assume that the upstream supports HTTP/2 when making new - # HTTP connection pool connections. Currently, Envoy only supports prior - # knowledge for upstream connections. Even if TLS is used with ALPN, - # `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - # connections to happen over plain text. This has been deprecated in favor of - # http2_protocol_options fields in the :ref:`http_protocol_options - # ` - # message. http2_protocol_options can be set via the cluster's :ref:`extensio - # n_protocol_options`. See :ref:`upstream_http_protocol_options ` for example usage. - http2_protocol_options: "__core_v3__.Http2ProtocolOptions" = ( - betterproto.message_field(14) - ) - # The extension_protocol_options field is used to provide extension-specific - # protocol options for upstream connections. The key should match the - # extension filter name, such as "envoy.filters.network.thrift_proxy". See - # the extension's documentation for details on specific options. [#next- - # major-version: make this a list of typed extensions.] - typed_extension_protocol_options: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(36, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # If the DNS refresh rate is specified and the cluster type is either :ref:`S - # TRICT_DNS`, or :ref:`LOGICAL_DNS`, this value is used as the cluster’s - # DNS refresh rate. The value configured must be at least 1ms. If this - # setting is not specified, the value defaults to 5000ms. For cluster types - # other than :ref:`STRICT_DNS` and :ref:`LOGICAL_DNS` this setting is - # ignored. - dns_refresh_rate: timedelta = betterproto.message_field(16) - # If the DNS failure refresh rate is specified and the cluster type is either - # :ref:`STRICT_DNS`, or :ref:`LOGICAL_DNS`, this is used as the cluster’s - # DNS refresh rate when requests are failing. If this setting is not - # specified, the failure refresh rate defaults to the DNS refresh rate. For - # cluster types other than :ref:`STRICT_DNS` and :ref:`LOGICAL_DNS` this - # setting is ignored. - dns_failure_refresh_rate: "ClusterRefreshRate" = betterproto.message_field(44) - # Optional configuration for setting cluster's DNS refresh rate. If the value - # is set to true, cluster's DNS refresh rate will be set to resource record's - # TTL which comes from DNS resolution. - respect_dns_ttl: bool = betterproto.bool_field(39) - # The DNS IP address resolution policy. If this setting is not specified, the - # value defaults to :ref:`AUTO`. - dns_lookup_family: "ClusterDnsLookupFamily" = betterproto.enum_field(17) - # If DNS resolvers are specified and the cluster type is either :ref:`STRICT_ - # DNS`, or :ref:`LOGICAL_DNS`, this value is used to specify the cluster’s - # dns resolvers. If this setting is not specified, the value defaults to the - # default resolver, which uses /etc/resolv.conf for configuration. For - # cluster types other than :ref:`STRICT_DNS` and :ref:`LOGICAL_DNS` this - # setting is ignored. This field is deprecated in favor of - # *dns_resolution_config* which aggregates all of the DNS resolver - # configuration in a single message. - dns_resolvers: List["__core_v3__.Address"] = betterproto.message_field(18) - # Always use TCP queries instead of UDP queries for DNS lookups. This field - # is deprecated in favor of *dns_resolution_config* which aggregates all of - # the DNS resolver configuration in a single message. - use_tcp_for_dns_lookups: bool = betterproto.bool_field(45) - # DNS resolution configuration which includes the underlying dns resolver - # addresses and options. This field is deprecated in favor of - # :ref:`typed_dns_resolver_config - # `. - dns_resolution_config: "__core_v3__.DnsResolutionConfig" = ( - betterproto.message_field(53) - ) - # DNS resolver type configuration extension. This extension can be used to - # configure c-ares, apple, or any other DNS resolver types and the related - # parameters. For example, an object of :ref:`CaresDnsResolverConfig ` - # can be packed into this *typed_dns_resolver_config*. This configuration - # replaces the :ref:`dns_resolution_config - # ` - # configuration. During the transition period when both - # *dns_resolution_config* and *typed_dns_resolver_config* exists, when - # *typed_dns_resolver_config* is in place, Envoy will use it and ignore - # *dns_resolution_config*. When *typed_dns_resolver_config* is missing, the - # default behavior is in place. [#extension-category: - # envoy.network.dns_resolver] - typed_dns_resolver_config: "__core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(55) - ) - # Optional configuration for having cluster readiness block on warm-up. - # Currently, only applicable for :ref:`STRICT_DNS`, or :ref:`LOGICAL_DNS`. - # If true, cluster readiness blocks on warm-up. If false, the cluster will - # complete initialization whether or not warm-up has completed. Defaults to - # true. - wait_for_warm_on_init: Optional[bool] = betterproto.message_field( - 54, wraps=betterproto.TYPE_BOOL - ) - # If specified, outlier detection will be enabled for this upstream cluster. - # Each of the configuration values can be overridden via :ref:`runtime values - # `. - outlier_detection: "OutlierDetection" = betterproto.message_field(19) - # The interval for removing stale hosts from a cluster type :ref:`ORIGINAL_DS - # T`. Hosts are considered stale if they have not been used as upstream - # destinations during this interval. New hosts are added to original - # destination clusters on demand as new connections are redirected to Envoy, - # causing the number of hosts in the cluster to grow over time. Hosts that - # are not stale (they are actively used as destinations) are kept in the - # cluster, which allows connections to them remain open, saving the latency - # that would otherwise be spent on opening new connections. If this setting - # is not specified, the value defaults to 5000ms. For cluster types other - # than :ref:`ORIGINAL_DST` this setting is ignored. - cleanup_interval: timedelta = betterproto.message_field(20) - # Optional configuration used to bind newly established upstream connections. - # This overrides any bind_config specified in the bootstrap proto. If the - # address and port are empty, no bind will be performed. - upstream_bind_config: "__core_v3__.BindConfig" = betterproto.message_field(21) - # Configuration for load balancing subsetting. - lb_subset_config: "ClusterLbSubsetConfig" = betterproto.message_field(22) - # Optional configuration for the Ring Hash load balancing policy. - ring_hash_lb_config: "ClusterRingHashLbConfig" = betterproto.message_field( - 23, group="lb_config" - ) - # Optional configuration for the Maglev load balancing policy. - maglev_lb_config: "ClusterMaglevLbConfig" = betterproto.message_field( - 52, group="lb_config" - ) - # Optional configuration for the Original Destination load balancing policy. - original_dst_lb_config: "ClusterOriginalDstLbConfig" = betterproto.message_field( - 34, group="lb_config" - ) - # Optional configuration for the LeastRequest load balancing policy. - least_request_lb_config: "ClusterLeastRequestLbConfig" = betterproto.message_field( - 37, group="lb_config" - ) - # Optional configuration for the RoundRobin load balancing policy. - round_robin_lb_config: "ClusterRoundRobinLbConfig" = betterproto.message_field( - 56, group="lb_config" - ) - # Common configuration for all load balancer implementations. - common_lb_config: "ClusterCommonLbConfig" = betterproto.message_field(27) - # Optional custom transport socket implementation to use for upstream - # connections. To setup TLS, set a transport socket with name - # `envoy.transport_sockets.tls` and :ref:`UpstreamTlsContexts - # ` - # in the `typed_config`. If no transport socket configuration is specified, - # new connections will be set up with plaintext. - transport_socket: "__core_v3__.TransportSocket" = betterproto.message_field(24) - # The Metadata field can be used to provide additional information about the - # cluster. It can be used for stats, logging, and varying filter behavior. - # Fields should use reverse DNS notation to denote which entity within Envoy - # will need the information. For instance, if the metadata is intended for - # the Router filter, the filter name should be specified as - # *envoy.filters.http.router*. - metadata: "__core_v3__.Metadata" = betterproto.message_field(25) - # Determines how Envoy selects the protocol used to speak to upstream hosts. - # This has been deprecated in favor of setting explicit protocol selection in - # the :ref:`http_protocol_options - # ` - # message. http_protocol_options can be set via the cluster's :ref:`extension - # _protocol_options`. - protocol_selection: "ClusterClusterProtocolSelection" = betterproto.enum_field(26) - # Optional options for upstream connections. - upstream_connection_options: "UpstreamConnectionOptions" = ( - betterproto.message_field(30) - ) - # If an upstream host becomes unhealthy (as determined by the configured - # health checks or outlier detection), immediately close all connections to - # the failed host. .. note:: This is currently only supported for - # connections created by tcp_proxy. .. note:: The current implementation of - # this feature closes all connections immediately when the unhealthy status - # is detected. If there are a large number of connections open to an - # upstream host that becomes unhealthy, Envoy may spend a substantial amount - # of time exclusively closing these connections, and not processing any - # other traffic. - close_connections_on_host_health_failure: bool = betterproto.bool_field(31) - # If set to true, Envoy will ignore the health value of a host when - # processing its removal from service discovery. This means that if active - # health checking is used, Envoy will *not* wait for the endpoint to go - # unhealthy before removing it. - ignore_health_on_host_removal: bool = betterproto.bool_field(32) - # An (optional) network filter chain, listed in the order the filters should - # be applied. The chain will be applied to all outgoing connections that - # Envoy makes to the upstream servers of this cluster. - filters: List["Filter"] = betterproto.message_field(40) - # If this field is set and is supported by the client, it will supersede the - # value of - # :ref:`lb_policy`. - load_balancing_policy: "LoadBalancingPolicy" = betterproto.message_field(41) - # [#not-implemented-hide:] If present, tells the client where to send load - # reports via LRS. If not present, the client will fall back to a client-side - # default, which may be either (a) don't send any load reports or (b) send - # load reports for all clusters to a single default server (which may be - # configured in the bootstrap file). Note that if multiple clusters point to - # the same LRS server, the client may choose to create a separate stream for - # each cluster or it may choose to coalesce the data for multiple clusters - # onto a single stream. Either way, the client must make sure to send the - # data for any given cluster on no more than one stream. [#next-major- - # version: In the v3 API, we should consider restructuring this somehow, - # maybe by allowing LRS to go on the ADS stream, or maybe by moving some of - # the negotiation from the LRS stream here.] - lrs_server: "__core_v3__.ConfigSource" = betterproto.message_field(42) - # If track_timeout_budgets is true, the :ref:`timeout budget histograms - # ` will be published - # for each request. These show what percentage of a request's per try and - # global timeout was used. A value of 0 would indicate that none of the - # timeout was used or that the timeout was infinite. A value of 100 would - # indicate that the request took the entirety of the timeout given to it. .. - # attention:: This field has been deprecated in favor of `timeout_budgets`, - # part of :ref:`track_cluster_stats - # `. - track_timeout_budgets: bool = betterproto.bool_field(47) - # Optional customization and configuration of upstream connection pool, and - # upstream type. Currently this field only applies for HTTP traffic but is - # designed for eventual use for custom TCP upstreams. For HTTP traffic, Envoy - # will generally take downstream HTTP and send it upstream as upstream HTTP, - # using the http connection pool and the codec from `http2_protocol_options` - # For routes where CONNECT termination is configured, Envoy will take - # downstream CONNECT requests and forward the CONNECT payload upstream over - # raw TCP using the tcp connection pool. The default pool used is the generic - # connection pool which creates the HTTP upstream for most HTTP requests, and - # the TCP upstream if CONNECT termination is configured. If users desire - # custom connection pool or upstream behavior, for example terminating - # CONNECT only if a custom filter indicates it is appropriate, the custom - # factories can be registered and configured here. [#extension-category: - # envoy.upstreams] - upstream_config: "__core_v3__.TypedExtensionConfig" = betterproto.message_field(48) - # Configuration to track optional cluster stats. - track_cluster_stats: "TrackClusterStats" = betterproto.message_field(49) - # Preconnect configuration for this cluster. - preconnect_policy: "ClusterPreconnectPolicy" = betterproto.message_field(50) - # If `connection_pool_per_downstream_connection` is true, the cluster will - # use a separate connection pool for every downstream connection - connection_pool_per_downstream_connection: bool = betterproto.bool_field(51) - - def __post_init__(self) -> None: - super().__post_init__() - if self.max_requests_per_connection: - warnings.warn( - "Cluster.max_requests_per_connection is deprecated", DeprecationWarning - ) - if self.upstream_http_protocol_options: - warnings.warn( - "Cluster.upstream_http_protocol_options is deprecated", - DeprecationWarning, - ) - if self.common_http_protocol_options: - warnings.warn( - "Cluster.common_http_protocol_options is deprecated", DeprecationWarning - ) - if self.http_protocol_options: - warnings.warn( - "Cluster.http_protocol_options is deprecated", DeprecationWarning - ) - if self.http2_protocol_options: - warnings.warn( - "Cluster.http2_protocol_options is deprecated", DeprecationWarning - ) - if self.dns_resolvers: - warnings.warn("Cluster.dns_resolvers is deprecated", DeprecationWarning) - if self.use_tcp_for_dns_lookups: - warnings.warn( - "Cluster.use_tcp_for_dns_lookups is deprecated", DeprecationWarning - ) - if self.dns_resolution_config: - warnings.warn( - "Cluster.dns_resolution_config is deprecated", DeprecationWarning - ) - if self.protocol_selection: - warnings.warn( - "Cluster.protocol_selection is deprecated", DeprecationWarning - ) - if self.track_timeout_budgets: - warnings.warn( - "Cluster.track_timeout_budgets is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class ClusterTransportSocketMatch(betterproto.Message): - """ - TransportSocketMatch specifies what transport socket config will be used - when the match conditions are satisfied. - """ - - # The name of the match, used in stats generation. - name: str = betterproto.string_field(1) - # Optional endpoint metadata match criteria. The connection to the endpoint - # with metadata matching what is set in this field will use the transport - # socket configuration specified here. The endpoint's metadata entry in - # *envoy.transport_socket_match* is used to match against the values - # specified in this field. - match: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - # The configuration of the transport socket. [#extension-category: - # envoy.transport_sockets.upstream] - transport_socket: "__core_v3__.TransportSocket" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterCustomClusterType(betterproto.Message): - """Extended cluster type.""" - - # The type of the cluster to instantiate. The name must match a supported - # cluster type. - name: str = betterproto.string_field(1) - # Cluster specific configuration which depends on the cluster being - # instantiated. See the supported cluster for further documentation. - # [#extension-category: envoy.clusters] - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterEdsClusterConfig(betterproto.Message): - """Only valid when discovery type is EDS.""" - - # Configuration for the source of EDS updates for this Cluster. - eds_config: "__core_v3__.ConfigSource" = betterproto.message_field(1) - # Optional alternative to cluster name to present to EDS. This does not have - # the same restrictions as cluster name, i.e. it may be arbitrary length. - # This may be a xdstp:// URL. - service_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterLbSubsetConfig(betterproto.Message): - """ - Optionally divide the endpoints in this cluster into subsets defined by - endpoint metadata and selected by route and weighted cluster metadata. - [#next-free-field: 8] - """ - - # The behavior used when no endpoint subset matches the selected route's - # metadata. The value defaults to :ref:`NO_FALLBACK - # `. - fallback_policy: "ClusterLbSubsetConfigLbSubsetFallbackPolicy" = ( - betterproto.enum_field(1) - ) - # Specifies the default subset of endpoints used during fallback if - # fallback_policy is :ref:`DEFAULT_SUBSET`. Each - # field in default_subset is compared to the matching LbEndpoint.Metadata - # under the *envoy.lb* namespace. It is valid for no hosts to match, in which - # case the behavior is the same as a fallback_policy of :ref:`NO_FALLBACK`. - default_subset: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(2) - ) - # For each entry, LbEndpoint.Metadata's *envoy.lb* namespace is traversed and - # a subset is created for each unique combination of key and value. For - # example: .. code-block:: json { "subset_selectors": [ { "keys": [ - # "version" ] }, { "keys": [ "stage", "hardware_type" ] } ]} A subset - # is matched when the metadata from the selected route and weighted cluster - # contains the same keys and values as the subset's metadata. The same host - # may appear in multiple subsets. - subset_selectors: List[ - "ClusterLbSubsetConfigLbSubsetSelector" - ] = betterproto.message_field(3) - # If true, routing to subsets will take into account the localities and - # locality weights of the endpoints when making the routing decision. There - # are some potential pitfalls associated with enabling this feature, as the - # resulting traffic split after applying both a subset match and locality - # weights might be undesirable. Consider for example a situation in which you - # have 50/50 split across two localities X/Y which have 100 hosts each - # without subsetting. If the subset LB results in X having only 1 host - # selected but Y having 100, then a lot more load is being dumped on the - # single host in X than originally anticipated in the load balancing - # assignment delivered via EDS. - locality_weight_aware: bool = betterproto.bool_field(4) - # When used with locality_weight_aware, scales the weight of each locality by - # the ratio of hosts in the subset vs hosts in the original subset. This aims - # to even out the load going to an individual locality if said locality is - # disproportionately affected by the subset predicate. - scale_locality_weight: bool = betterproto.bool_field(5) - # If true, when a fallback policy is configured and its corresponding subset - # fails to find a host this will cause any host to be selected instead. This - # is useful when using the default subset as the fallback policy, given the - # default subset might become empty. With this option enabled, if that - # happens the LB will attempt to select a host from the entire cluster. - panic_mode_any: bool = betterproto.bool_field(6) - # If true, metadata specified for a metadata key will be matched against the - # corresponding endpoint metadata if the endpoint metadata matches the value - # exactly OR it is a list value and any of the elements in the list matches - # the criteria. - list_as_any: bool = betterproto.bool_field(7) - - -@dataclass(eq=False, repr=False) -class ClusterLbSubsetConfigLbSubsetSelector(betterproto.Message): - """Specifications for subsets.""" - - # List of keys to match with the weighted cluster metadata. - keys: List[str] = betterproto.string_field(1) - # Selects a mode of operation in which each subset has only one host. This - # mode uses the same rules for choosing a host, but updating hosts is faster, - # especially for large numbers of hosts. If a match is found to a host, that - # host will be used regardless of priority levels, unless the host is - # unhealthy. Currently, this mode is only supported if `subset_selectors` has - # only one entry, and `keys` contains only one entry. When this mode is - # enabled, configurations that contain more than one host with the same - # metadata value for the single key in `keys` will use only one of the hosts - # with the given key; no requests will be routed to the others. The cluster - # gauge :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are present in - # the current configuration. - single_host_per_subset: bool = betterproto.bool_field(4) - # The behavior used when no endpoint subset matches the selected route's - # metadata. - fallback_policy: "ClusterLbSubsetConfigLbSubsetSelectorLbSubsetSelectorFallbackPolicy" = betterproto.enum_field( - 2 - ) - # Subset of :ref:`keys` used by :ref:`KEYS_SUBSET` fallback policy. It has to be a non empty - # list if KEYS_SUBSET fallback policy is selected. For any other fallback - # policy the parameter is not used and should not be set. Only values also - # present in :ref:`keys` are allowed, but `fallback_keys_subset` - # cannot be equal to `keys`. - fallback_keys_subset: List[str] = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterSlowStartConfig(betterproto.Message): - """ - Configuration for :ref:`slow start mode - `. - """ - - # Represents the size of slow start window. If set, the newly created host - # remains in slow start mode starting from its creation time for the duration - # of slow start window. - slow_start_window: timedelta = betterproto.message_field(1) - # This parameter controls the speed of traffic increase over the slow start - # window. Defaults to 1.0, so that endpoint would get linearly increasing - # amount of traffic. When increasing the value for this parameter, the speed - # of traffic ramp-up increases non-linearly. The value of aggression - # parameter should be greater than 0.0. By tuning the parameter, is possible - # to achieve polynomial or exponential shape of ramp-up curve. During slow - # start window, effective weight of an endpoint would be scaled with time - # factor and aggression: `new_weight = weight * time_factor ^ (1 / - # aggression)`, where `time_factor=(time_since_start_seconds / - # slow_start_time_seconds)`. As time progresses, more and more traffic would - # be sent to endpoint, which is in slow start window. Once host exits slow - # start, time_factor and aggression no longer affect its weight. - aggression: "__core_v3__.RuntimeDouble" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterRoundRobinLbConfig(betterproto.Message): - """Specific configuration for the RoundRobin load balancing policy.""" - - # Configuration for slow start mode. If this configuration is not set, slow - # start will not be not enabled. - slow_start_config: "ClusterSlowStartConfig" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ClusterLeastRequestLbConfig(betterproto.Message): - """Specific configuration for the LeastRequest load balancing policy.""" - - # The number of random healthy hosts from which the host with the fewest - # active requests will be chosen. Defaults to 2 so that we perform two-choice - # selection if the field is not set. - choice_count: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # The following formula is used to calculate the dynamic weights when hosts - # have different load balancing weights: `weight = load_balancing_weight / - # (active_requests + 1)^active_request_bias` The larger the active request - # bias is, the more aggressively active requests will lower the effective - # weight when all host weights are not equal. `active_request_bias` must be - # greater than or equal to 0.0. When `active_request_bias == 0.0` the Least - # Request Load Balancer doesn't consider the number of active requests at the - # time it picks a host and behaves like the Round Robin Load Balancer. When - # `active_request_bias > 0.0` the Least Request Load Balancer scales the load - # balancing weight by the number of active requests at the time it does a - # pick. The value is cached for performance reasons and refreshed whenever - # one of the Load Balancer's host sets changes, e.g., whenever there is a - # host membership update or a host load balancing weight change. .. note:: - # This setting only takes effect if all host weights are not equal. - active_request_bias: "__core_v3__.RuntimeDouble" = betterproto.message_field(2) - # Configuration for slow start mode. If this configuration is not set, slow - # start will not be not enabled. - slow_start_config: "ClusterSlowStartConfig" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterRingHashLbConfig(betterproto.Message): - """ - Specific configuration for the - :ref:`RingHash` load - balancing policy. - """ - - # Minimum hash ring size. The larger the ring is (that is, the more hashes - # there are for each provided host) the better the request distribution will - # reflect the desired weights. Defaults to 1024 entries, and limited to 8M - # entries. See also :ref:`maximum_ring_size`. - minimum_ring_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT64 - ) - # The hash function used to hash hosts onto the ketama ring. The value - # defaults to :ref:`XX_HASH`. - hash_function: "ClusterRingHashLbConfigHashFunction" = betterproto.enum_field(3) - # Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, - # but can be lowered to further constrain resource use. See also :ref:`minimu - # m_ring_size`. - maximum_ring_size: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT64 - ) - - -@dataclass(eq=False, repr=False) -class ClusterMaglevLbConfig(betterproto.Message): - """ - Specific configuration for the - :ref:`Maglev` load balancing - policy. - """ - - # The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ - # rather than an absolute guarantee. Minimal disruption means that when the - # set of upstreams changes, a connection will likely be sent to the same - # upstream as it was before. Increasing the table size reduces the amount of - # disruption. The table size must be prime number limited to 5000011. If it - # is not specified, the default is 65537. - table_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT64 - ) - - -@dataclass(eq=False, repr=False) -class ClusterOriginalDstLbConfig(betterproto.Message): - """ - Specific configuration for the :ref:`Original Destination - ` load balancing - policy. - """ - - # When true, :ref:`x-envoy-original-dst-host ` can be used to override destination address. .. - # attention:: This header isn't sanitized by default, so enabling this - # feature allows HTTP clients to route traffic to arbitrary hosts and/or - # ports, which may have serious security consequences. .. note:: If the - # header appears multiple times only the first value is used. - use_http_header: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfig(betterproto.Message): - """ - Common configuration for all load balancer implementations. [#next-free- - field: 8] - """ - - # Configures the :ref:`healthy panic threshold - # `. If not specified, the - # default is 50%. To disable panic mode, set to 0%. .. note:: The specified - # percent will be truncated to the nearest 1%. - healthy_panic_threshold: "___type_v3__.Percent" = betterproto.message_field(1) - zone_aware_lb_config: "ClusterCommonLbConfigZoneAwareLbConfig" = ( - betterproto.message_field(2, group="locality_config_specifier") - ) - locality_weighted_lb_config: "ClusterCommonLbConfigLocalityWeightedLbConfig" = ( - betterproto.message_field(3, group="locality_config_specifier") - ) - # If set, all health check/weight/metadata updates that happen within this - # duration will be merged and delivered in one shot when the duration - # expires. The start of the duration is when the first update happens. This - # is useful for big clusters, with potentially noisy deploys that might - # trigger excessive CPU usage due to a constant stream of healthcheck state - # changes or metadata updates. The first set of updates to be seen apply - # immediately (e.g.: a new cluster). Please always keep in mind that the use - # of sandbox technologies may change this behavior. If this is not set, we - # default to a merge window of 1000ms. To disable it, set the merge window to - # 0. Note: merging does not apply to cluster membership changes (e.g.: - # adds/removes); this is because merging those updates isn't currently safe. - # See https://github.com/envoyproxy/envoy/pull/3941. - update_merge_window: timedelta = betterproto.message_field(4) - # If set to true, Envoy will :ref:`exclude - # ` new hosts when computing load - # balancing weights until they have been health checked for the first time. - # This will have no effect unless active health checking is also configured. - ignore_new_hosts_until_first_hc: bool = betterproto.bool_field(5) - # If set to `true`, the cluster manager will drain all existing connections - # to upstream hosts whenever hosts are added or removed from the cluster. - close_connections_on_host_set_change: bool = betterproto.bool_field(6) - # Common Configuration for all consistent hashing load balancers (MaglevLb, - # RingHashLb, etc.) - consistent_hashing_lb_config: "ClusterCommonLbConfigConsistentHashingLbConfig" = ( - betterproto.message_field(7) - ) - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfigZoneAwareLbConfig(betterproto.Message): - """ - Configuration for :ref:`zone aware routing - `. - """ - - # Configures percentage of requests that will be considered for zone aware - # routing if zone aware routing is configured. If not specified, the default - # is 100%. * :ref:`runtime values - # `. * :ref:`Zone aware - # routing support `. - routing_enabled: "___type_v3__.Percent" = betterproto.message_field(1) - # Configures minimum upstream cluster size required for zone aware routing If - # upstream cluster size is less than specified, zone aware routing is not - # performed even if zone aware routing is configured. If not specified, the - # default is 6. * :ref:`runtime values - # `. * :ref:`Zone aware - # routing support `. - min_cluster_size: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT64 - ) - # If set to true, Envoy will not consider any hosts when the cluster is in - # :ref:`panic mode`. Instead, - # the cluster will fail all requests as if all hosts are unhealthy. This can - # help avoid potentially overwhelming a failing service. - fail_traffic_on_panic: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfigLocalityWeightedLbConfig(betterproto.Message): - """ - Configuration for :ref:`locality weighted load balancing - ` - """ - - pass - - -@dataclass(eq=False, repr=False) -class ClusterCommonLbConfigConsistentHashingLbConfig(betterproto.Message): - """ - Common Configuration for all consistent hashing load balancers (MaglevLb, - RingHashLb, etc.) - """ - - # If set to `true`, the cluster will use hostname instead of the resolved - # address as the key to consistently hash to an upstream host. Only valid for - # StrictDNS clusters with hostnames which resolve to a single IP address. - use_hostname_for_hashing: bool = betterproto.bool_field(1) - # Configures percentage of average cluster load to bound per upstream host. - # For example, with a value of 150 no upstream host will get a load more than - # 1.5 times the average load of all the hosts in the cluster. If not - # specified, the load is not bounded for any upstream host. Typical value for - # this parameter is between 120 and 200. Minimum is 100. Applies to both Ring - # Hash and Maglev load balancers. This is implemented based on the method - # described in the paper https://arxiv.org/abs/1608.01350. For the specified - # `hash_balance_factor`, requests to any upstream host are capped at - # `hash_balance_factor/100` times the average number of requests across the - # cluster. When a request arrives for an upstream host that is currently - # serving at its max capacity, linear probing is used to identify an eligible - # host. Further, the linear probe is implemented using a random jump in hosts - # ring/table to identify the eligible host (this technique is as described in - # the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the - # cascading overflow effect when choosing the next host in the ring/table). - # If weights are specified on the hosts, they are respected. This is an O(N) - # algorithm, unlike other load balancers. Using a lower `hash_balance_factor` - # results in more hosts being probed, so use a higher value if you require - # better performance. - hash_balance_factor: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class ClusterRefreshRate(betterproto.Message): - # Specifies the base interval between refreshes. This parameter is required - # and must be greater than zero and less than :ref:`max_interval - # `. - base_interval: timedelta = betterproto.message_field(1) - # Specifies the maximum interval between refreshes. This parameter is - # optional, but must be greater than or equal to the :ref:`base_interval - # ` - # if set. The default is 10 times the :ref:`base_interval - # `. - max_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterPreconnectPolicy(betterproto.Message): - # Indicates how many streams (rounded up) can be anticipated per-upstream for - # each incoming stream. This is useful for high-QPS or latency-sensitive - # services. Preconnecting will only be done if the upstream is healthy and - # the cluster has traffic. For example if this is 2, for an incoming HTTP/1.1 - # stream, 2 connections will be established, one for the new incoming stream, - # and one for a presumed follow-up stream. For HTTP/2, only one connection - # would be established by default as one connection can serve both the - # original and presumed follow-up stream. In steady state for non-multiplexed - # connections a value of 1.5 would mean if there were 100 active streams, - # there would be 100 connections in use, and 50 connections preconnected. - # This might be a useful value for something like short lived single-use - # connections, for example proxying HTTP/1.1 if keep-alive were false and - # each stream resulted in connection termination. It would likely be overkill - # for long lived connections, such as TCP proxying SMTP or regular HTTP/1.1 - # with keep-alive. For long lived traffic, a value of 1.05 would be more - # reasonable, where for every 100 connections, 5 preconnected connections - # would be in the queue in case of unexpected disconnects where the - # connection could not be reused. If this value is not set, or set explicitly - # to one, Envoy will fetch as many connections as needed to serve streams in - # flight. This means in steady state if a connection is torn down, a - # subsequent streams will pay an upstream-rtt latency penalty waiting for a - # new connection. This is limited somewhat arbitrarily to 3 because - # preconnecting too aggressively can harm latency more than the preconnecting - # helps. - per_upstream_preconnect_ratio: Optional[float] = betterproto.message_field( - 1, wraps=betterproto.TYPE_DOUBLE - ) - # Indicates how many many streams (rounded up) can be anticipated across a - # cluster for each stream, useful for low QPS services. This is currently - # supported for a subset of deterministic non-hash-based load-balancing - # algorithms (weighted round robin, random). Unlike - # *per_upstream_preconnect_ratio* this preconnects across the upstream - # instances in a cluster, doing best effort predictions of what upstream - # would be picked next and pre-establishing a connection. Preconnecting will - # be limited to one preconnect per configured upstream in the cluster and - # will only be done if there are healthy upstreams and the cluster has - # traffic. For example if preconnecting is set to 2 for a round robin HTTP/2 - # cluster, on the first incoming stream, 2 connections will be preconnected - - # one to the first upstream for this cluster, one to the second on the - # assumption there will be a follow-up stream. If this value is not set, or - # set explicitly to one, Envoy will fetch as many connections as needed to - # serve streams in flight, so during warm up and in steady state if a - # connection is closed (and per_upstream_preconnect_ratio is not set), there - # will be a latency hit for connection establishment. If both this and - # preconnect_ratio are set, Envoy will make sure both predicted needs are - # met, basically preconnecting max(predictive-preconnect, per-upstream- - # preconnect), for each upstream. - predictive_preconnect_ratio: Optional[float] = betterproto.message_field( - 2, wraps=betterproto.TYPE_DOUBLE - ) - - -@dataclass(eq=False, repr=False) -class LoadBalancingPolicy(betterproto.Message): - """ - Extensible load balancing policy configuration. Every LB policy defined via - this mechanism will be identified via a unique name using reverse DNS - notation. If the policy needs configuration parameters, it must define a - message for its own configuration, which will be stored in the config - field. The name of the policy will tell clients which type of message they - should expect to see in the config field. Note that there are cases where - it is useful to be able to independently select LB policies for choosing a - locality and for choosing an endpoint within that locality. For example, a - given deployment may always use the same policy to choose the locality, but - for choosing the endpoint within the locality, some clusters may use - weighted-round-robin, while others may use some sort of session-based - balancing. This can be accomplished via hierarchical LB policies, where the - parent LB policy creates a child LB policy for each locality. For each - request, the parent chooses the locality and then delegates to the child - policy for that locality to choose the endpoint within the locality. To - facilitate this, the config message for the top-level LB policy may include - a field of type LoadBalancingPolicy that specifies the child policy. - """ - - # Each client will iterate over the list in order and stop at the first - # policy that it supports. This provides a mechanism for starting to use new - # LB policies that are not yet supported by all clients. - policies: List["LoadBalancingPolicyPolicy"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class LoadBalancingPolicyPolicy(betterproto.Message): - typed_extension_config: "__core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(4) - ) - - -@dataclass(eq=False, repr=False) -class UpstreamBindConfig(betterproto.Message): - """ - An extensible structure containing the address Envoy should bind to when - establishing upstream connections. - """ - - # The address Envoy should bind to when establishing upstream connections. - source_address: "__core_v3__.Address" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class UpstreamConnectionOptions(betterproto.Message): - # If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - tcp_keepalive: "__core_v3__.TcpKeepalive" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class TrackClusterStats(betterproto.Message): - # If timeout_budgets is true, the :ref:`timeout budget histograms - # ` will be published - # for each request. These show what percentage of a request's per try and - # global timeout was used. A value of 0 would indicate that none of the - # timeout was used or that the timeout was infinite. A value of 100 would - # indicate that the request took the entirety of the timeout given to it. - timeout_budgets: bool = betterproto.bool_field(1) - # If request_response_sizes is true, then the :ref:`histograms - # ` tracking - # header and body sizes of requests and responses will be published. - request_response_sizes: bool = betterproto.bool_field(2) - - -from .....xds.core import v3 as ____xds_core_v3__ -from ....type import v3 as ___type_v3__ -from ...core import v3 as __core_v3__ -from ...endpoint import v3 as __endpoint_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/common/__init__.py b/src/envoy_data_plane/envoy/config/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/common/dynamic_forward_proxy/__init__.py b/src/envoy_data_plane/envoy/config/common/dynamic_forward_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/common/dynamic_forward_proxy/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/common/dynamic_forward_proxy/v2alpha/__init__.py deleted file mode 100644 index ecd5c0c..0000000 --- a/src/envoy_data_plane/envoy/config/common/dynamic_forward_proxy/v2alpha/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class DnsCacheConfig(betterproto.Message): - """ - Configuration for the dynamic forward proxy DNS cache. See the - :ref:`architecture overview ` for - more information. [#next-free-field: 7] - """ - - # The name of the cache. Multiple named caches allow independent dynamic - # forward proxy configurations to operate within a single Envoy process using - # different configurations. All configurations with the same name *must* - # otherwise have the same settings when referenced from different - # configuration components. Configuration will fail to load if this is not - # the case. - name: str = betterproto.string_field(1) - # The DNS lookup family to use during resolution. - # [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy - # eyeballs" mode. The way this might work is a new lookup family which - # returns both IPv4 and IPv6 addresses, and then configures a host to have a - # primary and fall back address. With this, we could very likely build a - # "happy eyeballs" connection pool which would race the primary / fall back - # address and return the one that wins. This same method could potentially - # also be used for QUIC to TCP fall back.] - dns_lookup_family: "____api_v2__.ClusterDnsLookupFamily" = betterproto.enum_field(2) - # The DNS refresh rate for currently cached DNS hosts. If not specified - # defaults to 60s. .. note: The returned DNS TTL is not currently used to - # alter the refresh rate. This feature will be added in a future change. .. - # note: The refresh rate is rounded to the closest millisecond, and must be - # at least 1ms. - dns_refresh_rate: timedelta = betterproto.message_field(3) - # The TTL for hosts that are unused. Hosts that have not been used in the - # configured time interval will be purged. If not specified defaults to 5m. - # .. note: The TTL is only checked at the time of DNS refresh, as specified - # by *dns_refresh_rate*. This means that if the configured TTL is shorter - # than the refresh rate the host may not be removed immediately. .. note: - # The TTL has no relation to DNS TTL and is only used to control Envoy's - # resource usage. - host_ttl: timedelta = betterproto.message_field(4) - # The maximum number of hosts that the cache will hold. If not specified - # defaults to 1024. .. note: The implementation is approximate and enforced - # independently on each worker thread, thus it is possible for the maximum - # hosts in the cache to go slightly above the configured value depending on - # timing. This is similar to how other circuit breakers work. - max_hosts: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # If the DNS failure refresh rate is specified, this is used as the cache's - # DNS refresh rate when DNS requests are failing. If this setting is not - # specified, the failure refresh rate defaults to the dns_refresh_rate. - dns_failure_refresh_rate: "____api_v2__.ClusterRefreshRate" = ( - betterproto.message_field(6) - ) - - -from .....api import v2 as ____api_v2__ diff --git a/src/envoy_data_plane/envoy/config/common/key_value/__init__.py b/src/envoy_data_plane/envoy/config/common/key_value/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/common/key_value/v3/__init__.py b/src/envoy_data_plane/envoy/config/common/key_value/v3/__init__.py deleted file mode 100644 index ba0a548..0000000 --- a/src/envoy_data_plane/envoy/config/common/key_value/v3/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/common/key_value/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class KeyValueStoreConfig(betterproto.Message): - """This shared configuration for Envoy key value stores.""" - - # [#extension-category: envoy.common.key_value] - config: "___core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - - -from ....core import v3 as ___core_v3__ diff --git a/src/envoy_data_plane/envoy/config/common/matcher/__init__.py b/src/envoy_data_plane/envoy/config/common/matcher/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/common/matcher/v3/__init__.py b/src/envoy_data_plane/envoy/config/common/matcher/v3/__init__.py deleted file mode 100644 index 10f3235..0000000 --- a/src/envoy_data_plane/envoy/config/common/matcher/v3/__init__.py +++ /dev/null @@ -1,242 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/common/matcher/v3/matcher.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Matcher(betterproto.Message): - """ - A matcher, which may traverse a matching tree in order to result in a match - action. During matching, the tree will be traversed until a match is found, - or if no match is found the action specified by the most specific - on_no_match will be evaluated. As an on_no_match might result in another - matching tree being evaluated, this process might repeat several times - until the final OnMatch (or no match) is decided. - """ - - # A linear list of matchers to evaluate. - matcher_list: "MatcherMatcherList" = betterproto.message_field( - 1, group="matcher_type" - ) - # A match tree to evaluate. - matcher_tree: "MatcherMatcherTree" = betterproto.message_field( - 2, group="matcher_type" - ) - # Optional OnMatch to use if the matcher failed. If specified, the OnMatch is - # used, and the matcher is considered to have matched. If not specified, the - # matcher is considered not to have matched. - on_no_match: "MatcherOnMatch" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class MatcherOnMatch(betterproto.Message): - """What to do if a match is successful.""" - - # Nested matcher to evaluate. If the nested matcher does not match and does - # not specify on_no_match, then this matcher is considered not to have - # matched, even if a predicate at this level or above returned true. - matcher: "Matcher" = betterproto.message_field(1, group="on_match") - # Protocol-specific action to take. - action: "___core_v3__.TypedExtensionConfig" = betterproto.message_field( - 2, group="on_match" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherList(betterproto.Message): - """ - A linear list of field matchers. The field matchers are evaluated in order, - and the first match wins. - """ - - # A list of matchers. First match wins. - matchers: List["MatcherMatcherListFieldMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListPredicate(betterproto.Message): - """Predicate to determine if a match is successful.""" - - # A single predicate to evaluate. - single_predicate: "MatcherMatcherListPredicateSinglePredicate" = ( - betterproto.message_field(1, group="match_type") - ) - # A list of predicates to be OR-ed together. - or_matcher: "MatcherMatcherListPredicatePredicateList" = betterproto.message_field( - 2, group="match_type" - ) - # A list of predicates to be AND-ed together. - and_matcher: "MatcherMatcherListPredicatePredicateList" = betterproto.message_field( - 3, group="match_type" - ) - # The invert of a predicate - not_matcher: "MatcherMatcherListPredicate" = betterproto.message_field( - 4, group="match_type" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListPredicateSinglePredicate(betterproto.Message): - """Predicate for a single input field.""" - - # Protocol-specific specification of input field to match on. [#extension- - # category: envoy.matching.common_inputs] - input: "___core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - # Built-in string matcher. - value_match: "____type_matcher_v3__.StringMatcher" = betterproto.message_field( - 2, group="matcher" - ) - # Extension for custom matching logic. [#extension-category: - # envoy.matching.input_matchers] - custom_match: "___core_v3__.TypedExtensionConfig" = betterproto.message_field( - 3, group="matcher" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListPredicatePredicateList(betterproto.Message): - """ - A list of two or more matchers. Used to allow using a list within a oneof. - """ - - predicate: List["MatcherMatcherListPredicate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListFieldMatcher(betterproto.Message): - """An individual matcher.""" - - # Determines if the match succeeds. - predicate: "MatcherMatcherListPredicate" = betterproto.message_field(1) - # What to do if the match succeeds. - on_match: "MatcherOnMatch" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherTree(betterproto.Message): - # Protocol-specific specification of input field to match on. - input: "___core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - exact_match_map: "MatcherMatcherTreeMatchMap" = betterproto.message_field( - 2, group="tree_type" - ) - # Longest matching prefix wins. - prefix_match_map: "MatcherMatcherTreeMatchMap" = betterproto.message_field( - 3, group="tree_type" - ) - # Extension for custom matching logic. - custom_match: "___core_v3__.TypedExtensionConfig" = betterproto.message_field( - 4, group="tree_type" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherTreeMatchMap(betterproto.Message): - """ - A map of configured matchers. Used to allow using a map within a oneof. - """ - - map: Dict[str, "MatcherOnMatch"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class MatchPredicate(betterproto.Message): - """ - Match configuration. This is a recursive structure which allows complex - nested match configurations to be built using various logical operators. - [#next-free-field: 11] - """ - - # A set that describes a logical OR. If any member of the set matches, the - # match configuration matches. - or_match: "MatchPredicateMatchSet" = betterproto.message_field(1, group="rule") - # A set that describes a logical AND. If all members of the set match, the - # match configuration matches. - and_match: "MatchPredicateMatchSet" = betterproto.message_field(2, group="rule") - # A negation match. The match configuration will match if the negated match - # condition matches. - not_match: "MatchPredicate" = betterproto.message_field(3, group="rule") - # The match configuration will always match. - any_match: bool = betterproto.bool_field(4, group="rule") - # HTTP request headers match configuration. - http_request_headers_match: "HttpHeadersMatch" = betterproto.message_field( - 5, group="rule" - ) - # HTTP request trailers match configuration. - http_request_trailers_match: "HttpHeadersMatch" = betterproto.message_field( - 6, group="rule" - ) - # HTTP response headers match configuration. - http_response_headers_match: "HttpHeadersMatch" = betterproto.message_field( - 7, group="rule" - ) - # HTTP response trailers match configuration. - http_response_trailers_match: "HttpHeadersMatch" = betterproto.message_field( - 8, group="rule" - ) - # HTTP request generic body match configuration. - http_request_generic_body_match: "HttpGenericBodyMatch" = betterproto.message_field( - 9, group="rule" - ) - # HTTP response generic body match configuration. - http_response_generic_body_match: "HttpGenericBodyMatch" = ( - betterproto.message_field(10, group="rule") - ) - - -@dataclass(eq=False, repr=False) -class MatchPredicateMatchSet(betterproto.Message): - """A set of match configurations used for logical operations.""" - - # The list of rules that make up the set. - rules: List["MatchPredicate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HttpHeadersMatch(betterproto.Message): - """HTTP headers match configuration.""" - - # HTTP headers to match. - headers: List["___route_v3__.HeaderMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HttpGenericBodyMatch(betterproto.Message): - """ - HTTP generic body match configuration. List of text strings and hex strings - to be located in HTTP body. All specified strings must be found in the HTTP - body for positive match. The search may be limited to specified number of - bytes from the body start. .. attention:: Searching for patterns in HTTP - body is potentially cpu intensive. For each specified pattern, http body is - scanned byte by byte to find a match. If multiple patterns are specified, - the process is repeated for each pattern. If location of a pattern is - known, ``bytes_limit`` should be specified to scan only part of the http - body. - """ - - # Limits search to specified number of bytes - default zero (no limit - match - # entire captured buffer). - bytes_limit: int = betterproto.uint32_field(1) - # List of patterns to match. - patterns: List["HttpGenericBodyMatchGenericTextMatch"] = betterproto.message_field( - 2 - ) - - -@dataclass(eq=False, repr=False) -class HttpGenericBodyMatchGenericTextMatch(betterproto.Message): - # Text string to be located in HTTP body. - string_match: str = betterproto.string_field(1, group="rule") - # Sequence of bytes to be located in HTTP body. - binary_match: bytes = betterproto.bytes_field(2, group="rule") - - -from .....type.matcher import v3 as ____type_matcher_v3__ -from ....core import v3 as ___core_v3__ -from ....route import v3 as ___route_v3__ diff --git a/src/envoy_data_plane/envoy/config/common/mutation_rules/__init__.py b/src/envoy_data_plane/envoy/config/common/mutation_rules/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/common/mutation_rules/v3/__init__.py b/src/envoy_data_plane/envoy/config/common/mutation_rules/v3/__init__.py deleted file mode 100644 index 60a46db..0000000 --- a/src/envoy_data_plane/envoy/config/common/mutation_rules/v3/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/common/mutation_rules/v3/mutation_rules.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HeaderMutationRules(betterproto.Message): - """ - The HeaderMutationRules structure specifies what headers may be manipulated - by a processing filter. This set of rules makes it possible to control - which modifications a filter may make. By default, an external processing - server may add, modify, or remove any header except for an "Envoy internal" - header (which is typically denoted by an x-envoy prefix) or specific - headers that may affect further filter processing: * host * :authority * - :scheme * :method Every attempt to add, change, append, or remove a header - will be tested against the rules here. Disallowed header mutations will be - ignored unless *disallow_is_error* is set to true. In addition, a counter - will be incremented whenever a mutation is rejected. In the ext_proc - filter, that counter is named "rejected_header_mutations". [#next-free- - field: 8] [#not-implemented-hide:] - """ - - # By default, certain headers that could affect processing of subsequent - # filters or request routing cannot be modified. These headers are "host", - # ":authority", ":scheme", and ":method". Setting this parameter to true - # allows these headers to be modified as well. - allow_all_routing: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # If true, allow modification of envoy internal headers. By default, these - # start with "x-envoy" but this may be overridden in the *Bootstrap* - # configuration using the :ref:`header_prefix - # ` field. - # Default is false. - allow_envoy: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # If true, prevent modification of any system header, defined as a header - # that starts with a ":" character, regardless of any other settings. A - # processing server may still override the ":status" of an HTTP response - # using an *ImmediateResponse* message. Default is false. - disallow_system: Optional[bool] = betterproto.message_field( - 3, wraps=betterproto.TYPE_BOOL - ) - # If true, prevent modifications of all header values, regardless of any - # other settings. A processing server may still override the ":status" of an - # HTTP response using an *ImmediateResponse* message. Default is false. - disallow_all: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # If set, specifically allow any header that matches this regular expression. - # This overrides all other settings except for *disallow_expression*. - allow_expression: "____type_matcher_v3__.RegexMatcher" = betterproto.message_field( - 5 - ) - # If set, specifically disallow any header that matches this regular - # expression regardless of any other settings. - disallow_expression: "____type_matcher_v3__.RegexMatcher" = ( - betterproto.message_field(6) - ) - # If true, and if the rules in this list cause a header mutation to be - # disallowed, then the filter using this configuration will terminate the - # request with a 500 error. In addition, regardless of the setting of this - # parameter, any attempt to set, add, or modify a disallowed header will - # cause the "rejected_header_mutations" counter to be incremented. Default is - # false. - disallow_is_error: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL - ) - - -from .....type.matcher import v3 as ____type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/config/common/tap/__init__.py b/src/envoy_data_plane/envoy/config/common/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/common/tap/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/common/tap/v2alpha/__init__.py deleted file mode 100644 index cc5e67a..0000000 --- a/src/envoy_data_plane/envoy/config/common/tap/v2alpha/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/common/tap/v2alpha/common.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CommonExtensionConfig(betterproto.Message): - """Common configuration for all tap extensions.""" - - # If specified, the tap filter will be configured via an admin handler. - admin_config: "AdminConfig" = betterproto.message_field(1, group="config_type") - # If specified, the tap filter will be configured via a static configuration - # that cannot be changed. - static_config: "____service_tap_v2_alpha__.TapConfig" = betterproto.message_field( - 2, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class AdminConfig(betterproto.Message): - """ - Configuration for the admin handler. See :ref:`here - ` for more information. - """ - - # Opaque configuration ID. When requests are made to the admin handler, the - # passed opaque ID is matched to the configured filter opaque ID to determine - # which filter to configure. - config_id: str = betterproto.string_field(1) - - -from .....service.tap import v2alpha as ____service_tap_v2_alpha__ diff --git a/src/envoy_data_plane/envoy/config/core/__init__.py b/src/envoy_data_plane/envoy/config/core/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/core/v3/__init__.py b/src/envoy_data_plane/envoy/config/core/v3/__init__.py deleted file mode 100644 index a35e5d1..0000000 --- a/src/envoy_data_plane/envoy/config/core/v3/__init__.py +++ /dev/null @@ -1,2131 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/core/v3/address.proto, envoy/config/core/v3/backoff.proto, envoy/config/core/v3/base.proto, envoy/config/core/v3/config_source.proto, envoy/config/core/v3/event_service_config.proto, envoy/config/core/v3/extension.proto, envoy/config/core/v3/grpc_method_list.proto, envoy/config/core/v3/grpc_service.proto, envoy/config/core/v3/health_check.proto, envoy/config/core/v3/http_uri.proto, envoy/config/core/v3/protocol.proto, envoy/config/core/v3/proxy_protocol.proto, envoy/config/core/v3/resolver.proto, envoy/config/core/v3/socket_option.proto, envoy/config/core/v3/substitution_format_string.proto, envoy/config/core/v3/udp_socket_config.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class SocketOptionSocketState(betterproto.Enum): - STATE_PREBIND = 0 - STATE_BOUND = 1 - STATE_LISTENING = 2 - - -class SocketAddressProtocol(betterproto.Enum): - TCP = 0 - UDP = 1 - - -class RoutingPriority(betterproto.Enum): - """ - Envoy supports :ref:`upstream priority routing - ` both at the route and the virtual - cluster level. The current priority implementation uses different - connection pool and circuit breaking settings for each priority level. This - means that even for HTTP/2 requests, two physical connections will be used - to an upstream host. In the future Envoy will likely support true HTTP/2 - priority over a single upstream connection. - """ - - DEFAULT = 0 - HIGH = 1 - - -class RequestMethod(betterproto.Enum): - """HTTP request method.""" - - METHOD_UNSPECIFIED = 0 - GET = 1 - HEAD = 2 - POST = 3 - PUT = 4 - DELETE = 5 - CONNECT = 6 - OPTIONS = 7 - TRACE = 8 - PATCH = 9 - - -class TrafficDirection(betterproto.Enum): - """Identifies the direction of the traffic relative to the local Envoy.""" - - # Default option is unspecified. - UNSPECIFIED = 0 - # The transport is used for incoming traffic. - INBOUND = 1 - # The transport is used for outgoing traffic. - OUTBOUND = 2 - - -class HeaderValueOptionHeaderAppendAction(betterproto.Enum): - APPEND_IF_EXISTS_OR_ADD = 0 - ADD_IF_ABSENT = 1 - OVERWRITE_IF_EXISTS_OR_ADD = 2 - - -class ApiVersion(betterproto.Enum): - """ - xDS API and non-xDS services version. This is used to describe both - resource and transport protocol versions (in distinct configuration - fields). - """ - - # When not specified, we assume v2, to ease migration to Envoy's stable API - # versioning. If a client does not support v2 (e.g. due to deprecation), this - # is an invalid value. - AUTO = 0 - # Use xDS v2 API. - V2 = 1 - # Use xDS v3 API. - V3 = 2 - - -class ApiConfigSourceApiType(betterproto.Enum): - DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 - REST = 1 - GRPC = 2 - DELTA_GRPC = 3 - AGGREGATED_GRPC = 5 - AGGREGATED_DELTA_GRPC = 6 - - -class ProxyProtocolConfigVersion(betterproto.Enum): - V1 = 0 - V2 = 1 - - -class HealthStatus(betterproto.Enum): - """Endpoint health status.""" - - # The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0 - # Healthy. - HEALTHY = 1 - # Unhealthy. - UNHEALTHY = 2 - # Connection draining in progress. E.g., - # ``_ or - # ``_. This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3 - # Health check timed out. This is part of HDS and is interpreted by Envoy as - # *UNHEALTHY*. - TIMEOUT = 4 - # Degraded. - DEGRADED = 5 - - -class HttpProtocolOptionsHeadersWithUnderscoresAction(betterproto.Enum): - ALLOW = 0 - REJECT_REQUEST = 1 - DROP_HEADER = 2 - - -@dataclass(eq=False, repr=False) -class SocketOption(betterproto.Message): - """ - Generic socket option message. This would be used to set socket options - that might not exist in upstream kernels or precompiled Envoy binaries. - [#next-free-field: 7] - """ - - # An optional name to give this socket option for debugging, etc. Uniqueness - # is not required and no special meaning is assumed. - description: str = betterproto.string_field(1) - # Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - level: int = betterproto.int64_field(2) - # The numeric name as passed to setsockopt - name: int = betterproto.int64_field(3) - # Because many sockopts take an int value. - int_value: int = betterproto.int64_field(4, group="value") - # Otherwise it's a byte buffer. - buf_value: bytes = betterproto.bytes_field(5, group="value") - # The state in which the option will be applied. When used in BindConfig - # STATE_PREBIND is currently the only valid value. - state: "SocketOptionSocketState" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class Pipe(betterproto.Message): - # Unix Domain Socket path. On Linux, paths starting with '@' will use the - # abstract namespace. The starting '@' is replaced by a null byte by Envoy. - # Paths starting with '@' will result in an error in environments other than - # Linux. - path: str = betterproto.string_field(1) - # The mode for the Pipe. Not applicable for abstract sockets. - mode: int = betterproto.uint32_field(2) - - -@dataclass(eq=False, repr=False) -class EnvoyInternalAddress(betterproto.Message): - """ - [#not-implemented-hide:] The address represents an envoy internal listener. - TODO(lambdai): Make this address available for listener and endpoint. - TODO(asraa): When address available, remove workaround from - test/server/server_fuzz_test.cc:30. - """ - - # [#not-implemented-hide:] The :ref:`listener name - # ` of the destination - # internal listener. - server_listener_name: str = betterproto.string_field( - 1, group="address_name_specifier" - ) - - -@dataclass(eq=False, repr=False) -class SocketAddress(betterproto.Message): - """[#next-free-field: 7]""" - - protocol: "SocketAddressProtocol" = betterproto.enum_field(1) - # The address for this socket. :ref:`Listeners ` will bind - # to the address. An empty address is not allowed. Specify ``0.0.0.0`` or - # ``::`` to bind to any address. [#comment:TODO(zuercher) reinstate when - # implemented: It is possible to distinguish a Listener address via the - # prefix/suffix matching in :ref:`FilterChainMatch - # `.] When used within - # an upstream :ref:`BindConfig `, - # the address controls the source address of outbound connections. For - # :ref:`clusters `, the cluster - # type determines whether the address must be an IP (*STATIC* or *EDS* - # clusters) or a hostname resolved by DNS (*STRICT_DNS* or *LOGICAL_DNS* - # clusters). Address resolution can be customized via :ref:`resolver_name - # `. - address: str = betterproto.string_field(2) - port_value: int = betterproto.uint32_field(3, group="port_specifier") - # This is only valid if :ref:`resolver_name - # ` is - # specified below and the named resolver is capable of named port resolution. - named_port: str = betterproto.string_field(4, group="port_specifier") - # The name of the custom resolver. This must have been registered with Envoy. - # If this is empty, a context dependent default applies. If the address is a - # concrete IP address, no resolution will occur. If address is a hostname - # this should be set for resolution other than DNS. Specifying a custom - # resolver with *STRICT_DNS* or *LOGICAL_DNS* will generate an error at - # runtime. - resolver_name: str = betterproto.string_field(5) - # When binding to an IPv6 address above, this enables `IPv4 compatibility - # `_. Binding to ``::`` will - # allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - # IPv6 space as ``::FFFF:``. - ipv4_compat: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class TcpKeepalive(betterproto.Message): - # Maximum number of keepalive probes to send without response before deciding - # the connection is dead. Default is to use the OS level configuration - # (unless overridden, Linux defaults to 9.) - keepalive_probes: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # The number of seconds a connection needs to be idle before keep-alive - # probes start being sent. Default is to use the OS level configuration - # (unless overridden, Linux defaults to 7200s (i.e., 2 hours.) - keepalive_time: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The number of seconds between keep-alive probes. Default is to use the OS - # level configuration (unless overridden, Linux defaults to 75s.) - keepalive_interval: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class BindConfig(betterproto.Message): - # The address to bind to when creating a socket. - source_address: "SocketAddress" = betterproto.message_field(1) - # Whether to set the *IP_FREEBIND* option when creating the socket. When this - # flag is set to true, allows the :ref:`source_address - # ` - # to be an IP address that is not configured on the system running Envoy. - # When this flag is set to false, the option *IP_FREEBIND* is disabled on the - # socket. When this flag is not set (default), the socket is not modified, - # i.e. the option is neither enabled nor disabled. - freebind: Optional[bool] = betterproto.message_field(2, wraps=betterproto.TYPE_BOOL) - # Additional socket options that may not be present in Envoy source code or - # precompiled binaries. - socket_options: List["SocketOption"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Address(betterproto.Message): - """ - Addresses specify either a logical or physical address and port, which are - used to tell Envoy where to bind/listen, connect to upstream and find - management servers. - """ - - socket_address: "SocketAddress" = betterproto.message_field(1, group="address") - pipe: "Pipe" = betterproto.message_field(2, group="address") - # [#not-implemented-hide:] - envoy_internal_address: "EnvoyInternalAddress" = betterproto.message_field( - 3, group="address" - ) - - -@dataclass(eq=False, repr=False) -class CidrRange(betterproto.Message): - """ - CidrRange specifies an IP Address and a prefix length to construct the - subnet mask for a `CIDR `_ range. - """ - - # IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - address_prefix: str = betterproto.string_field(1) - # Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - prefix_len: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class BackoffStrategy(betterproto.Message): - """Configuration defining a jittered exponential back off strategy.""" - - # The base interval to be used for the next back off computation. It should - # be greater than zero and less than or equal to :ref:`max_interval - # `. - base_interval: timedelta = betterproto.message_field(1) - # Specifies the maximum interval between retries. This parameter is optional, - # but must be greater than or equal to the :ref:`base_interval - # ` if set. - # The default is 10 times the :ref:`base_interval - # `. - max_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HttpUri(betterproto.Message): - """Envoy external URI descriptor""" - - # The HTTP server URI. It should be a full FQDN with protocol, host and path. - # Example: .. code-block:: yaml uri: - # https://www.googleapis.com/oauth2/v1/certs - uri: str = betterproto.string_field(1) - # A cluster is created in the Envoy "cluster_manager" config section. This - # field specifies the cluster name. Example: .. code-block:: yaml cluster: - # jwks_cluster - cluster: str = betterproto.string_field(2, group="http_upstream_type") - # Sets the maximum duration in milliseconds that a response can take to - # arrive upon request. - timeout: timedelta = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Locality(betterproto.Message): - """ - Identifies location of where either Envoy runs or where upstream hosts run. - """ - - # Region this :ref:`zone ` - # belongs to. - region: str = betterproto.string_field(1) - # Defines the local service zone where Envoy is running. Though optional, it - # should be set if discovery service routing is used and the discovery - # service exposes :ref:`zone data - # `, - # either in this message or via :option:`--service-zone`. The meaning of zone - # is context dependent, e.g. `Availability Zone (AZ) - # `_ on AWS, `Zone - # `_ on GCP, etc. - zone: str = betterproto.string_field(2) - # When used for locality of upstream hosts, this field further splits zone - # into smaller chunks of sub-zones so they can be load balanced - # independently. - sub_zone: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class BuildVersion(betterproto.Message): - """ - BuildVersion combines SemVer version of extension with free-form build - information (i.e. 'alpha', 'private-build') as a set of strings. - """ - - # SemVer version of extension. - version: "___type_v3__.SemanticVersion" = betterproto.message_field(1) - # Free-form build information. Envoy defines several well known keys in the - # source/common/version/version.h file - metadata: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Extension(betterproto.Message): - """ - Version and identification for an Envoy extension. [#next-free-field: 6] - """ - - # This is the name of the Envoy filter as specified in the Envoy - # configuration, e.g. envoy.filters.http.router, com.acme.widget. - name: str = betterproto.string_field(1) - # Category of the extension. Extension category names use reverse DNS - # notation. For instance "envoy.filters.listener" for Envoy's built-in - # listener filters or "com.acme.filters.http" for HTTP filters from acme.com - # vendor. [#comment:TODO(yanavlasov): Link to the doc with existing envoy - # category names.] - category: str = betterproto.string_field(2) - # [#not-implemented-hide:] Type descriptor of extension configuration proto. - # [#comment:TODO(yanavlasov): Link to the doc with existing configuration - # protos.] [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - type_descriptor: str = betterproto.string_field(3) - # The version is a property of the extension and maintained independently of - # other extensions and the Envoy API. This field is not set when extension - # did not provide version information. - version: "BuildVersion" = betterproto.message_field(4) - # Indicates that the extension is present but was disabled via dynamic - # configuration. - disabled: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class Node(betterproto.Message): - """ - Identifies a specific Envoy instance. The node identifier is presented to - the management server, which may use this identifier to distinguish per - Envoy configuration for serving. [#next-free-field: 13] - """ - - # An opaque node identifier for the Envoy node. This also provides the local - # service node name. It should be set if any of the following features are - # used: :ref:`statsd `, :ref:`CDS - # `, and :ref:`HTTP tracing - # `, either in this message or via :option:`--service- - # node`. - id: str = betterproto.string_field(1) - # Defines the local service cluster name where Envoy is running. Though - # optional, it should be set if any of the following features are used: - # :ref:`statsd `, :ref:`health check cluster - # verification `, :ref:`runtime override directory - # `, :ref:`user agent addition - # `, :ref:`HTTP global rate limiting - # `, :ref:`CDS `, - # and :ref:`HTTP tracing `, either in this message or - # via :option:`--service-cluster`. - cluster: str = betterproto.string_field(2) - # Opaque metadata extending the node identifier. Envoy will pass this - # directly to the management server. - metadata: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(3) - # Map from xDS resource type URL to dynamic context parameters. These may - # vary at runtime (unlike other fields in this message). For example, the xDS - # client may have a shard identifier that changes during the lifetime of the - # xDS client. In Envoy, this would be achieved by updating the dynamic - # context on the Server::Instance's LocalInfo context provider. The shard ID - # dynamic parameter then appears in this field during future discovery - # requests. - dynamic_parameters: Dict[ - str, "____xds_core_v3__.ContextParams" - ] = betterproto.map_field(12, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Locality specifying where the Envoy instance is running. - locality: "Locality" = betterproto.message_field(4) - # Free-form string that identifies the entity requesting config. E.g. "envoy" - # or "grpc" - user_agent_name: str = betterproto.string_field(6) - # Free-form string that identifies the version of the entity requesting - # config. E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - user_agent_version: str = betterproto.string_field( - 7, group="user_agent_version_type" - ) - # Structured version of the entity requesting config. - user_agent_build_version: "BuildVersion" = betterproto.message_field( - 8, group="user_agent_version_type" - ) - # List of extensions and their versions supported by the node. - extensions: List["Extension"] = betterproto.message_field(9) - # Client feature support list. These are well known features described in the - # Envoy API repository for a given major version of an API. Client features - # use reverse DNS naming scheme, for example `com.acme.feature`. See - # :ref:`the list of features ` that xDS client may support. - client_features: List[str] = betterproto.string_field(10) - # Known listening ports on the node as a generic hint to the management - # server for filtering :ref:`listeners ` to be returned. - # For example, if there is a listener bound to port 80, the list can - # optionally contain the SocketAddress `(0.0.0.0,80)`. The field is optional - # and just a hint. - listening_addresses: List["Address"] = betterproto.message_field(11) - - def __post_init__(self) -> None: - super().__post_init__() - if self.listening_addresses: - warnings.warn("Node.listening_addresses is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class Metadata(betterproto.Message): - """ - Metadata provides additional inputs to filters based on matched listeners, - filter chains, routes and endpoints. It is structured as a map, usually - from filter name (in reverse DNS format) to metadata specific to the - filter. Metadata key-values for a filter are merged as connection and - request handling occurs, with later values for the same key overriding - earlier values. An example use of metadata is providing additional values - to http_connection_manager in the envoy.http_connection_manager.access_log - namespace. Another example use of metadata is to per service config info in - cluster metadata, which may get consumed by multiple filters. For load - balancing, Metadata provides a means to subset cluster endpoints. Endpoints - have a Metadata object associated and routes contain a Metadata object to - match against. There are some well defined metadata used today for this - purpose: * ``{"envoy.lb": {"canary": }}`` This indicates the canary - status of an endpoint and is also used during header processing - (x-envoy-upstream-canary) and for stats purposes. [#next-major-version: - move to type/metadata/v2] - """ - - # Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - # namespace is reserved for Envoy's built-in filters. If both - # *filter_metadata* and :ref:`typed_filter_metadata - # ` fields - # are present in the metadata with same keys, only *typed_filter_metadata* - # field will be parsed. - filter_metadata: Dict[ - str, "betterproto_lib_google_protobuf.Struct" - ] = betterproto.map_field(1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - # namespace is reserved for Envoy's built-in filters. The value is encoded as - # google.protobuf.Any. If both :ref:`filter_metadata - # ` and - # *typed_filter_metadata* fields are present in the metadata with same keys, - # only *typed_filter_metadata* field will be parsed. - typed_filter_metadata: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - - -@dataclass(eq=False, repr=False) -class RuntimeUInt32(betterproto.Message): - """Runtime derived uint32 with a default when not specified.""" - - # Default value if runtime value is not available. - default_value: int = betterproto.uint32_field(2) - # Runtime key to get value for comparison. This value is used if defined. - runtime_key: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class RuntimePercent(betterproto.Message): - """Runtime derived percentage with a default when not specified.""" - - # Default value if runtime value is not available. - default_value: "___type_v3__.Percent" = betterproto.message_field(1) - # Runtime key to get value for comparison. This value is used if defined. - runtime_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RuntimeDouble(betterproto.Message): - """Runtime derived double with a default when not specified.""" - - # Default value if runtime value is not available. - default_value: float = betterproto.double_field(1) - # Runtime key to get value for comparison. This value is used if defined. - runtime_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RuntimeFeatureFlag(betterproto.Message): - """Runtime derived bool with a default when not specified.""" - - # Default value if runtime value is not available. - default_value: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # Runtime key to get value for comparison. This value is used if defined. The - # boolean value must be represented via its `canonical JSON encoding - # `_. - runtime_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class QueryParameter(betterproto.Message): - """Query parameter name/value pair.""" - - # The key of the query parameter. Case sensitive. - key: str = betterproto.string_field(1) - # The value of the query parameter. - value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class HeaderValue(betterproto.Message): - """Header name/value pair.""" - - # Header name. - key: str = betterproto.string_field(1) - # Header value. The same :ref:`format specifier ` - # as used for :ref:`HTTP access logging ` applies here, - # however unknown header values are replaced with the empty string instead of - # `-`. - value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class HeaderValueOption(betterproto.Message): - """Header name/value pair plus option to control append behavior.""" - - # Header name/value pair that this option applies to. - header: "HeaderValue" = betterproto.message_field(1) - # Should the value be appended? If true (default), the value is appended to - # existing values. Otherwise it replaces any existing values. - append: Optional[bool] = betterproto.message_field(2, wraps=betterproto.TYPE_BOOL) - # [#not-implemented-hide:] Describes the action taken to append/overwrite the - # given value for an existing header or to only add this header if it's - # absent. Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD`. - append_action: "HeaderValueOptionHeaderAppendAction" = betterproto.enum_field(3) - - -@dataclass(eq=False, repr=False) -class HeaderMap(betterproto.Message): - """Wrapper for a set of headers.""" - - headers: List["HeaderValue"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class WatchedDirectory(betterproto.Message): - """ - A directory that is watched for changes, e.g. by inotify on Linux. - Move/rename events inside this directory trigger the watch. - """ - - # Directory path to watch. - path: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class DataSource(betterproto.Message): - """ - Data source consisting of a file, an inline value, or an environment - variable. - """ - - # Local filesystem data source. - filename: str = betterproto.string_field(1, group="specifier") - # Bytes inlined in the configuration. - inline_bytes: bytes = betterproto.bytes_field(2, group="specifier") - # String inlined in the configuration. - inline_string: str = betterproto.string_field(3, group="specifier") - # Environment variable data source. - environment_variable: str = betterproto.string_field(4, group="specifier") - - -@dataclass(eq=False, repr=False) -class RetryPolicy(betterproto.Message): - """ - The message specifies the retry policy of remote data source when fetching - fails. - """ - - # Specifies parameters that control :ref:`retry backoff strategy - # `. This parameter is - # optional, in which case the default base interval is 1000 milliseconds. The - # default maximum interval is 10 times the base interval. - retry_back_off: "BackoffStrategy" = betterproto.message_field(1) - # Specifies the allowed number of retries. This parameter is optional and - # defaults to 1. - num_retries: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class RemoteDataSource(betterproto.Message): - """ - The message specifies how to fetch data from remote and how to verify it. - """ - - # The HTTP URI to fetch the remote data. - http_uri: "HttpUri" = betterproto.message_field(1) - # SHA256 string for verifying data. - sha256: str = betterproto.string_field(2) - # Retry policy for fetching remote data. - retry_policy: "RetryPolicy" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class AsyncDataSource(betterproto.Message): - """Async data source which support async data fetch.""" - - # Local async data source. - local: "DataSource" = betterproto.message_field(1, group="specifier") - # Remote async data source. - remote: "RemoteDataSource" = betterproto.message_field(2, group="specifier") - - -@dataclass(eq=False, repr=False) -class TransportSocket(betterproto.Message): - """ - Configuration for transport socket in :ref:`listeners ` - and :ref:`clusters `. If the - configuration is empty, a default transport socket implementation and - configuration will be chosen based on the platform and existence of - tls_context. - """ - - # The name of the transport socket to instantiate. The name must match a - # supported transport socket implementation. - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class RuntimeFractionalPercent(betterproto.Message): - """ - Runtime derived FractionalPercent with defaults for when the numerator or - denominator is not specified via a runtime key. .. note:: Parsing of the - runtime key's data is implemented such that it may be represented as a - :ref:`FractionalPercent ` proto - represented as JSON/YAML and may also be represented as an integer with - the assumption that the value is an integral percentage out of 100. For - instance, a runtime key lookup returning the value "42" would parse as a - `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. - """ - - # Default value if the runtime value's for the numerator/denominator keys are - # not available. - default_value: "___type_v3__.FractionalPercent" = betterproto.message_field(1) - # Runtime key for a YAML representation of a FractionalPercent. - runtime_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ControlPlane(betterproto.Message): - """ - Identifies a specific ControlPlane instance that Envoy is connected to. - """ - - # An opaque control plane identifier that uniquely identifies an instance of - # control plane. This can be used to identify which control plane instance, - # the Envoy is connected to. - identifier: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcService(betterproto.Message): - """ - gRPC service configuration. This is used by :ref:`ApiConfigSource - ` and filter - configurations. [#next-free-field: 6] - """ - - # Envoy's in-built gRPC client. See the :ref:`gRPC services overview - # ` documentation for discussion on gRPC client - # selection. - envoy_grpc: "GrpcServiceEnvoyGrpc" = betterproto.message_field( - 1, group="target_specifier" - ) - # `Google C++ gRPC client `_ See the :ref:`gRPC - # services overview ` documentation for - # discussion on gRPC client selection. - google_grpc: "GrpcServiceGoogleGrpc" = betterproto.message_field( - 2, group="target_specifier" - ) - # The timeout for the gRPC request. This is the timeout for a specific - # request. - timeout: timedelta = betterproto.message_field(3) - # Additional metadata to include in streams initiated to the GrpcService. - # This can be used for scenarios in which additional ad hoc authorization - # headers (e.g. ``x-foo-bar: baz-key``) are to be injected. For more - # information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - initial_metadata: List["HeaderValue"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class GrpcServiceEnvoyGrpc(betterproto.Message): - # The name of the upstream gRPC cluster. SSL credentials will be supplied in - # the :ref:`Cluster ` - # :ref:`transport_socket - # `. - cluster_name: str = betterproto.string_field(1) - # The `:authority` header in the grpc request. If this field is not set, the - # authority header value will be `cluster_name`. Note that this authority - # does not override the SNI. The SNI is provided by the transport socket of - # the cluster. - authority: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpc(betterproto.Message): - """[#next-free-field: 9]""" - - # The target URI when using the `Google C++ gRPC client - # `_. SSL credentials will be supplied in - # :ref:`channel_credentials `. - target_uri: str = betterproto.string_field(1) - channel_credentials: "GrpcServiceGoogleGrpcChannelCredentials" = ( - betterproto.message_field(2) - ) - # A set of call credentials that can be composed with `channel credentials - # `_. - call_credentials: List[ - "GrpcServiceGoogleGrpcCallCredentials" - ] = betterproto.message_field(3) - # The human readable prefix to use when emitting statistics for the gRPC - # service. .. csv-table:: :header: Name, Type, Description :widths: 1, - # 1, 2 streams_total, Counter, Total number of streams opened - # streams_closed_, Counter, Total streams closed with - stat_prefix: str = betterproto.string_field(4) - # The name of the Google gRPC credentials factory to use. This must have been - # registered with Envoy. If this is empty, a default credentials factory will - # be used that sets up channel credentials based on other configuration - # parameters. - credentials_factory_name: str = betterproto.string_field(5) - # Additional configuration for site-specific customizations of the Google - # gRPC library. - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(6) - # How many bytes each stream can buffer internally. If not set an - # implementation defined default is applied (1MiB). - per_stream_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # Custom channels args. - channel_args: "GrpcServiceGoogleGrpcChannelArgs" = betterproto.message_field(8) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcSslCredentials(betterproto.Message): - """ - See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - """ - - # PEM encoded server root certificates. - root_certs: "DataSource" = betterproto.message_field(1) - # PEM encoded client private key. - private_key: "DataSource" = betterproto.message_field(2) - # PEM encoded client certificate chain. - cert_chain: "DataSource" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcGoogleLocalCredentials(betterproto.Message): - """ - Local channel credentials. Only UDS is supported for now. See - https://github.com/grpc/grpc/pull/15909. - """ - - pass - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcChannelCredentials(betterproto.Message): - """ - See https://grpc.io/docs/guides/auth.html#credential-types to understand - Channel and Call credential types. - """ - - ssl_credentials: "GrpcServiceGoogleGrpcSslCredentials" = betterproto.message_field( - 1, group="credential_specifier" - ) - # https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f - # 61 - google_default: "betterproto_lib_google_protobuf.Empty" = betterproto.message_field( - 2, group="credential_specifier" - ) - local_credentials: "GrpcServiceGoogleGrpcGoogleLocalCredentials" = ( - betterproto.message_field(3, group="credential_specifier") - ) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentials(betterproto.Message): - """[#next-free-field: 8]""" - - # Access token credentials. https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a8 - # 0da696ffdaea943f0f858d7a360d. - access_token: str = betterproto.string_field(1, group="credential_specifier") - # Google Compute Engine credentials. https://grpc.io/grpc/cpp/namespacegrpc.h - # tml#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google_compute_engine: "betterproto_lib_google_protobuf.Empty" = ( - betterproto.message_field(2, group="credential_specifier") - ) - # Google refresh token credentials. https://grpc.io/grpc/cpp/namespacegrpc.ht - # ml#a96901c997b91bc6513b08491e0dca37c. - google_refresh_token: str = betterproto.string_field( - 3, group="credential_specifier" - ) - # Service Account JWT Access credentials. https://grpc.io/grpc/cpp/namespaceg - # rpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - service_account_jwt_access: "GrpcServiceGoogleGrpcCallCredentialsServiceAccountJwtAccessCredentials" = betterproto.message_field( - 4, group="credential_specifier" - ) - # Google IAM credentials. https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc - # 101b41e680d47028166e76f9d0. - google_iam: "GrpcServiceGoogleGrpcCallCredentialsGoogleIamCredentials" = ( - betterproto.message_field(5, group="credential_specifier") - ) - # Custom authenticator credentials. https://grpc.io/grpc/cpp/namespacegrpc.ht - # ml#a823c6a4b19ffc71fb33e90154ee2ad07. - # https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other- - # authentication-mechanisms. - from_plugin: "GrpcServiceGoogleGrpcCallCredentialsMetadataCredentialsFromPlugin" = ( - betterproto.message_field(6, group="credential_specifier") - ) - # Custom security token service which implements OAuth 2.0 token exchange. - # https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 See - # https://github.com/grpc/grpc/pull/19587. - sts_service: "GrpcServiceGoogleGrpcCallCredentialsStsService" = ( - betterproto.message_field(7, group="credential_specifier") - ) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsServiceAccountJwtAccessCredentials( - betterproto.Message -): - json_key: str = betterproto.string_field(1) - token_lifetime_seconds: int = betterproto.uint64_field(2) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsGoogleIamCredentials(betterproto.Message): - authorization_token: str = betterproto.string_field(1) - authority_selector: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsMetadataCredentialsFromPlugin( - betterproto.Message -): - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcCallCredentialsStsService(betterproto.Message): - """ - Security token service configuration that allows Google gRPC to fetch - security token from an OAuth 2.0 authorization server. See - https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - https://github.com/grpc/grpc/pull/19587. [#next-free-field: 10] - """ - - # URI of the token exchange service that handles token exchange requests. - # [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - # https://github.com/envoyproxy/protoc-gen-validate/issues/303] - token_exchange_service_uri: str = betterproto.string_field(1) - # Location of the target service or resource where the client intends to use - # the requested security token. - resource: str = betterproto.string_field(2) - # Logical name of the target service where the client intends to use the - # requested security token. - audience: str = betterproto.string_field(3) - # The desired scope of the requested security token in the context of the - # service or resource where the token will be used. - scope: str = betterproto.string_field(4) - # Type of the requested security token. - requested_token_type: str = betterproto.string_field(5) - # The path of subject token, a security token that represents the identity of - # the party on behalf of whom the request is being made. - subject_token_path: str = betterproto.string_field(6) - # Type of the subject token. - subject_token_type: str = betterproto.string_field(7) - # The path of actor token, a security token that represents the identity of - # the acting party. The acting party is authorized to use the requested - # security token and act on behalf of the subject. - actor_token_path: str = betterproto.string_field(8) - # Type of the actor token. - actor_token_type: str = betterproto.string_field(9) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcChannelArgs(betterproto.Message): - """Channel arguments.""" - - # See grpc_types.h GRPC_ARG #defines for keys that work here. - args: Dict[str, "GrpcServiceGoogleGrpcChannelArgsValue"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class GrpcServiceGoogleGrpcChannelArgsValue(betterproto.Message): - string_value: str = betterproto.string_field(1, group="value_specifier") - int_value: int = betterproto.int64_field(2, group="value_specifier") - - -@dataclass(eq=False, repr=False) -class ApiConfigSource(betterproto.Message): - """ - API configuration source. This identifies the API type and cluster that - Envoy will use to fetch an xDS API. [#next-free-field: 9] - """ - - # API type (gRPC, REST, delta gRPC) - api_type: "ApiConfigSourceApiType" = betterproto.enum_field(1) - # API version for xDS transport protocol. This describes the xDS gRPC/REST - # endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - transport_api_version: "ApiVersion" = betterproto.enum_field(8) - # Cluster names should be used only with REST. If > 1 cluster is defined, - # clusters will be cycled through if any kind of failure occurs. .. note:: - # The cluster with name ``cluster_name`` must be statically defined and its - # type must not be ``EDS``. - cluster_names: List[str] = betterproto.string_field(2) - # Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - # services will be cycled through if any kind of failure occurs. - grpc_services: List["GrpcService"] = betterproto.message_field(4) - # For REST APIs, the delay between successive polls. - refresh_delay: timedelta = betterproto.message_field(3) - # For REST APIs, the request timeout. If not set, a default value of 1s will - # be used. - request_timeout: timedelta = betterproto.message_field(5) - # For GRPC APIs, the rate limit settings. If present, discovery requests made - # by Envoy will be rate limited. - rate_limit_settings: "RateLimitSettings" = betterproto.message_field(6) - # Skip the node identifier in subsequent discovery requests for streaming - # gRPC config types. - set_node_on_first_message_only: bool = betterproto.bool_field(7) - - -@dataclass(eq=False, repr=False) -class AggregatedConfigSource(betterproto.Message): - """ - Aggregated Discovery Service (ADS) options. This is currently empty, but - when set in :ref:`ConfigSource - ` can be used to specify that - ADS is to be used. - """ - - pass - - -@dataclass(eq=False, repr=False) -class SelfConfigSource(betterproto.Message): - """ - [#not-implemented-hide:] Self-referencing config source options. This is - currently empty, but when set in :ref:`ConfigSource - ` can be used to specify that - other data can be obtained from the same server. - """ - - # API version for xDS transport protocol. This describes the xDS gRPC/REST - # endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - transport_api_version: "ApiVersion" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class RateLimitSettings(betterproto.Message): - """ - Rate Limit settings to be applied for discovery requests made by Envoy. - """ - - # Maximum number of tokens to be used for rate limiting discovery request - # calls. If not set, a default value of 100 will be used. - max_tokens: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Rate at which tokens will be filled per second. If not set, a default fill - # rate of 10 tokens per second will be used. - fill_rate: Optional[float] = betterproto.message_field( - 2, wraps=betterproto.TYPE_DOUBLE - ) - - -@dataclass(eq=False, repr=False) -class ConfigSource(betterproto.Message): - """ - Configuration for :ref:`listeners `, :ref:`clusters - `, :ref:`routes - `, :ref:`endpoints - ` etc. may either be sourced from the - filesystem or from an xDS API source. Filesystem configs are watched with - inotify for updates. [#next-free-field: 8] - """ - - # Authorities that this config source may be used for. An authority specified - # in a xdstp:// URL is resolved to a *ConfigSource* prior to configuration - # fetch. This field provides the association between authority name and - # configuration source. [#not-implemented-hide:] - authorities: List["____xds_core_v3__.Authority"] = betterproto.message_field(7) - # Path on the filesystem to source and watch for configuration updates. When - # sourcing configuration for :ref:`secret - # `, the - # certificate and key files are also watched for updates. .. note:: The path - # to the source must exist at config load time. .. note:: Envoy will only - # watch the file path for *moves.* This is because in general only moves - # are atomic. The same method of swapping files as is demonstrated in the - # :ref:`runtime documentation ` can be - # used here also. - path: str = betterproto.string_field(1, group="config_source_specifier") - # API configuration source. - api_config_source: "ApiConfigSource" = betterproto.message_field( - 2, group="config_source_specifier" - ) - # When set, ADS will be used to fetch resources. The ADS API configuration - # source in the bootstrap configuration is used. - ads: "AggregatedConfigSource" = betterproto.message_field( - 3, group="config_source_specifier" - ) - # [#not-implemented-hide:] When set, the client will access the resources - # from the same server it got the ConfigSource from, although not necessarily - # from the same stream. This is similar to the - # :ref:`ads` field, except that the - # client may use a different stream to the same server. As a result, this - # field can be used for things like LRS that cannot be sent on an ADS stream. - # It can also be used to link from (e.g.) LDS to RDS on the same server - # without requiring the management server to know its name or required - # credentials. [#next-major-version: In xDS v3, consider replacing the ads - # field with this one, since this field can implicitly mean to use the same - # stream in the case where the ConfigSource is provided via ADS and the - # specified data can also be obtained via ADS.] - self: "SelfConfigSource" = betterproto.message_field( - 5, group="config_source_specifier" - ) - # When this timeout is specified, Envoy will wait no longer than the - # specified time for first config response on this xDS subscription during - # the :ref:`initialization process `. After - # reaching the timeout, Envoy will move to the next initialization phase, - # even if the first config is not delivered yet. The timer is activated when - # the xDS API subscription starts, and is disarmed on first config update or - # on error. 0 means no timeout - Envoy will wait indefinitely for the first - # xDS config (unless another timeout applies). The default is 15s. - initial_fetch_timeout: timedelta = betterproto.message_field(4) - # API version for xDS resources. This implies the type URLs that the client - # will request for resources and the resource type that the client will in - # turn expect to be delivered. - resource_api_version: "ApiVersion" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class TypedExtensionConfig(betterproto.Message): - """ - Message type for extension configuration. [#next-major-version: revisit all - existing typed_config that doesn't use this wrapper.]. - """ - - # The name of an extension. This is not used to select the extension, instead - # it serves the role of an opaque identifier. - name: str = betterproto.string_field(1) - # The typed config for the extension. The type URL will be used to identify - # the extension. In the case that the type URL is *xds.type.v3.TypedStruct* - # (or, for historical reasons, *udpa.type.v1.TypedStruct*), the inner type - # URL of *TypedStruct* will be utilized. See the :ref:`extension - # configuration overview ` for - # further details. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ExtensionConfigSource(betterproto.Message): - """ - Configuration source specifier for a late-bound extension configuration. - The parent resource is warmed until all the initial extension - configurations are received, unless the flag to apply the default - configuration is set. Subsequent extension updates are atomic on a per- - worker basis. Once an extension configuration is applied to a request or a - connection, it remains constant for the duration of processing. If the - initial delivery of the extension configuration fails, due to a timeout for - example, the optional default configuration is applied. Without a default - configuration, the extension is disabled, until an extension configuration - is received. The behavior of a disabled extension depends on the context. - For example, a filter chain with a disabled extension filter rejects all - incoming streams. - """ - - config_source: "ConfigSource" = betterproto.message_field(1) - # Optional default configuration to use as the initial configuration if there - # is a failure to receive the initial extension configuration or if - # `apply_default_config_without_warming` flag is set. - default_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # Use the default config as the initial configuration without warming and - # waiting for the first discovery response. Requires the default - # configuration to be supplied. - apply_default_config_without_warming: bool = betterproto.bool_field(3) - # A set of permitted extension type URLs. Extension configuration updates are - # rejected if they do not match any type URL in the set. - type_urls: List[str] = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class ProxyProtocolConfig(betterproto.Message): - # The PROXY protocol version to use. See - # https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details - version: "ProxyProtocolConfigVersion" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class EventServiceConfig(betterproto.Message): - """ - [#not-implemented-hide:] Configuration of the event reporting service - endpoint. - """ - - # Specifies the gRPC service that hosts the event reporting service. - grpc_service: "GrpcService" = betterproto.message_field( - 1, group="config_source_specifier" - ) - - -@dataclass(eq=False, repr=False) -class HealthCheck(betterproto.Message): - """[#next-free-field: 25]""" - - # The time to wait for a health check response. If the timeout is reached the - # health check attempt will be considered a failure. - timeout: timedelta = betterproto.message_field(1) - # The interval between health checks. - interval: timedelta = betterproto.message_field(2) - # An optional jitter amount in milliseconds. If specified, Envoy will start - # health checking after for a random time in ms between 0 and initial_jitter. - # This only applies to the first health check. - initial_jitter: timedelta = betterproto.message_field(20) - # An optional jitter amount in milliseconds. If specified, during every - # interval Envoy will add interval_jitter to the wait time. - interval_jitter: timedelta = betterproto.message_field(3) - # An optional jitter amount as a percentage of interval_ms. If specified, - # during every interval Envoy will add interval_ms * interval_jitter_percent - # / 100 to the wait time. If interval_jitter_ms and interval_jitter_percent - # are both set, both of them will be used to increase the wait time. - interval_jitter_percent: int = betterproto.uint32_field(18) - # The number of unhealthy health checks required before a host is marked - # unhealthy. Note that for *http* health checking if a host responds with a - # code not in :ref:`expected_statuses ` or :ref:`retriable_statuses `, this threshold is ignored and the host is considered immediately - # unhealthy. - unhealthy_threshold: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The number of healthy health checks required before a host is marked - # healthy. Note that during startup, only a single successful health check is - # required to mark a host healthy. - healthy_threshold: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # [#not-implemented-hide:] Non-serving port for health checking. - alt_port: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # Reuse health check connection between health checks. Default is true. - reuse_connection: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL - ) - # HTTP health check. - http_health_check: "HealthCheckHttpHealthCheck" = betterproto.message_field( - 8, group="health_checker" - ) - # TCP health check. - tcp_health_check: "HealthCheckTcpHealthCheck" = betterproto.message_field( - 9, group="health_checker" - ) - # gRPC health check. - grpc_health_check: "HealthCheckGrpcHealthCheck" = betterproto.message_field( - 11, group="health_checker" - ) - # Custom health check. - custom_health_check: "HealthCheckCustomHealthCheck" = betterproto.message_field( - 13, group="health_checker" - ) - # The "no traffic interval" is a special health check interval that is used - # when a cluster has never had traffic routed to it. This lower interval - # allows cluster information to be kept up to date, without sending a - # potentially large amount of active health checking traffic for no reason. - # Once a cluster has been used for traffic routing, Envoy will shift back to - # using the standard health check interval that is defined. Note that this - # interval takes precedence over any other. The default value for "no traffic - # interval" is 60 seconds. - no_traffic_interval: timedelta = betterproto.message_field(12) - # The "no traffic healthy interval" is a special health check interval that - # is used for hosts that are currently passing active health checking - # (including new hosts) when the cluster has received no traffic. This is - # useful for when we want to send frequent health checks with - # `no_traffic_interval` but then revert to lower frequency - # `no_traffic_healthy_interval` once a host in the cluster is marked as - # healthy. Once a cluster has been used for traffic routing, Envoy will shift - # back to using the standard health check interval that is defined. If - # no_traffic_healthy_interval is not set, it will default to the no traffic - # interval and send that interval regardless of health state. - no_traffic_healthy_interval: timedelta = betterproto.message_field(24) - # The "unhealthy interval" is a health check interval that is used for hosts - # that are marked as unhealthy. As soon as the host is marked as healthy, - # Envoy will shift back to using the standard health check interval that is - # defined. The default value for "unhealthy interval" is the same as - # "interval". - unhealthy_interval: timedelta = betterproto.message_field(14) - # The "unhealthy edge interval" is a special health check interval that is - # used for the first health check right after a host is marked as unhealthy. - # For subsequent health checks Envoy will shift back to using either - # "unhealthy interval" if present or the standard health check interval that - # is defined. The default value for "unhealthy edge interval" is the same as - # "unhealthy interval". - unhealthy_edge_interval: timedelta = betterproto.message_field(15) - # The "healthy edge interval" is a special health check interval that is used - # for the first health check right after a host is marked as healthy. For - # subsequent health checks Envoy will shift back to using the standard health - # check interval that is defined. The default value for "healthy edge - # interval" is the same as the default interval. - healthy_edge_interval: timedelta = betterproto.message_field(16) - # Specifies the path to the :ref:`health check event log - # `. If empty, no event log will be - # written. - event_log_path: str = betterproto.string_field(17) - # [#not-implemented-hide:] The gRPC service for the health check event - # service. If empty, health check events won't be sent to a remote endpoint. - event_service: "EventServiceConfig" = betterproto.message_field(22) - # If set to true, health check failure events will always be logged. If set - # to false, only the initial health check failure event will be logged. The - # default value is false. - always_log_health_check_failures: bool = betterproto.bool_field(19) - # This allows overriding the cluster TLS settings, just for health check - # connections. - tls_options: "HealthCheckTlsOptions" = betterproto.message_field(21) - # Optional key/value pairs that will be used to match a transport socket from - # those specified in the cluster's :ref:`tranport socket matches - # `. - # For example, the following match criteria .. code-block:: yaml - # transport_socket_match_criteria: useMTLS: true Will match the following - # :ref:`cluster socket match - # ` .. code- - # block:: yaml transport_socket_matches: - name: "useMTLS" match: - # useMTLS: true transport_socket: name: envoy.transport_sockets.tls - # config: { ... } # tls socket configuration If this field is set, then for - # health checks it will supersede an entry of *envoy.transport_socket* in the - # :ref:`LbEndpoint.Metadata - # `. This allows - # using different transport socket capabilities for health checking versus - # proxying to the endpoint. If the key/values pairs specified do not match - # any :ref:`transport socket matches - # `, - # the cluster's :ref:`transport socket - # ` will be - # used for health check socket configuration. - transport_socket_match_criteria: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(23) - ) - - -@dataclass(eq=False, repr=False) -class HealthCheckPayload(betterproto.Message): - """Describes the encoding of the payload bytes in the payload.""" - - # Hex encoded payload. E.g., "000000FF". - text: str = betterproto.string_field(1, group="payload") - # [#not-implemented-hide:] Binary payload. - binary: bytes = betterproto.bytes_field(2, group="payload") - - -@dataclass(eq=False, repr=False) -class HealthCheckHttpHealthCheck(betterproto.Message): - """[#next-free-field: 13]""" - - # The value of the host header in the HTTP health check request. If left - # empty (default value), the name of the cluster this health check is - # associated with will be used. The host header can be customized for a - # specific endpoint by setting the :ref:`hostname ` field. - host: str = betterproto.string_field(1) - # Specifies the HTTP path that will be requested during health checking. For - # example */healthcheck*. - path: str = betterproto.string_field(2) - # [#not-implemented-hide:] HTTP specific payload. - send: "HealthCheckPayload" = betterproto.message_field(3) - # [#not-implemented-hide:] HTTP specific response. - receive: "HealthCheckPayload" = betterproto.message_field(4) - # Specifies a list of HTTP headers that should be added to each request that - # is sent to the health checked cluster. For more information, including - # details on header value syntax, see the documentation on :ref:`custom - # request headers `. - request_headers_to_add: List["HeaderValueOption"] = betterproto.message_field(6) - # Specifies a list of HTTP headers that should be removed from each request - # that is sent to the health checked cluster. - request_headers_to_remove: List[str] = betterproto.string_field(8) - # Specifies a list of HTTP response statuses considered healthy. If provided, - # replaces default 200-only policy - 200 must be included explicitly as - # needed. Ranges follow half-open semantics of :ref:`Int64Range - # `. The start and end of each range are - # required. Only statuses in the range [100, 600) are allowed. - expected_statuses: List["___type_v3__.Int64Range"] = betterproto.message_field(9) - # Specifies a list of HTTP response statuses considered retriable. If - # provided, responses in this range will count towards the configured - # :ref:`unhealthy_threshold - # `, but - # will not result in the host being considered immediately unhealthy. Ranges - # follow half-open semantics of :ref:`Int64Range - # `. The start and end of each range are - # required. Only statuses in the range [100, 600) are allowed. The - # :ref:`expected_statuses ` field takes precedence for any range - # overlaps with this field i.e. if status code 200 is both retriable and - # expected, a 200 response will be considered a successful health check. By - # default all responses not in :ref:`expected_statuses ` will result in - # the host being considered immediately unhealthy i.e. if status code 200 is - # expected and there are no configured retriable statuses, any non-200 - # response will result in the host being marked unhealthy. - retriable_statuses: List["___type_v3__.Int64Range"] = betterproto.message_field(12) - # Use specified application protocol for health checks. - codec_client_type: "___type_v3__.CodecClientType" = betterproto.enum_field(10) - # An optional service name parameter which is used to validate the identity - # of the health checked cluster using a :ref:`StringMatcher - # `. See the - # :ref:`architecture overview ` for - # more information. - service_name_matcher: "___type_matcher_v3__.StringMatcher" = ( - betterproto.message_field(11) - ) - - -@dataclass(eq=False, repr=False) -class HealthCheckTcpHealthCheck(betterproto.Message): - # Empty payloads imply a connect-only health check. - send: "HealthCheckPayload" = betterproto.message_field(1) - # When checking the response, “fuzzy” matching is performed such that each - # binary block must be found, and in the order specified, but not necessarily - # contiguous. - receive: List["HealthCheckPayload"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HealthCheckRedisHealthCheck(betterproto.Message): - # If set, optionally perform ``EXISTS `` instead of ``PING``. A return - # value from Redis of 0 (does not exist) is considered a passing healthcheck. - # A return value other than 0 is considered a failure. This allows the user - # to mark a Redis instance for maintenance by setting the specified key to - # any value and waiting for traffic to drain. - key: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckGrpcHealthCheck(betterproto.Message): - """ - `grpc.health.v1.Health `_-based healthcheck. See `gRPC doc - `_ for - details. - """ - - # An optional service name parameter which will be sent to gRPC service in - # `grpc.health.v1.HealthCheckRequest `_. message. See `gRPC health- - # checking overview `_ for more information. - service_name: str = betterproto.string_field(1) - # The value of the :authority header in the gRPC health check request. If - # left empty (default value), the name of the cluster this health check is - # associated with will be used. The authority header can be customized for a - # specific endpoint by setting the :ref:`hostname ` field. - authority: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class HealthCheckCustomHealthCheck(betterproto.Message): - """Custom health check.""" - - # The registered name of the custom health checker. - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class HealthCheckTlsOptions(betterproto.Message): - """ - Health checks occur over the transport socket specified for the cluster. - This implies that if a cluster is using a TLS-enabled transport socket, the - health check will also occur over TLS. This allows overriding the cluster - TLS settings, just for health check connections. - """ - - # Specifies the ALPN protocols for health check connections. This is useful - # if the corresponding upstream is using ALPN-based :ref:`FilterChainMatch - # ` along with - # different protocols for health checks versus data connections. If empty, no - # ALPN protocols will be set on health check connections. - alpn_protocols: List[str] = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class TcpProtocolOptions(betterproto.Message): - """[#not-implemented-hide:]""" - - pass - - -@dataclass(eq=False, repr=False) -class QuicKeepAliveSettings(betterproto.Message): - """ - Config for keepalive probes in a QUIC connection. Note that QUIC keep-alive - probing packets work differently from HTTP/2 keep-alive PINGs in a sense - that the probing packet itself doesn't timeout waiting for a probing - response. Quic has a shorter idle timeout than TCP, so it doesn't rely on - such probing to discover dead connections. If the peer fails to respond, - the connection will idle timeout eventually. Thus, they are configured - differently from :ref:`connection_keepalive `. - """ - - # The max interval for a connection to send keep-alive probing packets (with - # PING or PATH_RESPONSE). The value should be smaller than :ref:`connection - # idle_timeout - # ` - # to prevent idle timeout while not less than 1s to avoid throttling the - # connection or flooding the peer with probes. If :ref:`initial_interval - # ` - # is absent or zero, a client connection will use this value to start - # probing. If zero, disable keepalive probing. If absent, use the QUICHE - # default interval to probe. - max_interval: timedelta = betterproto.message_field(1) - # The interval to send the first few keep-alive probing packets to prevent - # connection from hitting the idle timeout. Subsequent probes will be sent, - # each one with an interval exponentially longer than previous one, till it - # reaches :ref:`max_interval - # `. - # And the probes afterwards will always use :ref:`max_interval - # `. - # The value should be smaller than :ref:`connection idle_timeout - # ` - # to prevent idle timeout and smaller than max_interval to take effect. If - # absent or zero, disable keepalive probing for a server connection. For a - # client connection, if :ref:`max_interval - # ` is - # also zero, do not keepalive, otherwise use max_interval or QUICHE default - # to probe all the time. - initial_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class QuicProtocolOptions(betterproto.Message): - """ - QUIC protocol options which apply to both downstream and upstream - connections. [#next-free-field: 6] - """ - - # Maximum number of streams that the client can negotiate per connection. 100 - # if not specified. - max_concurrent_streams: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # `Initial stream-level flow-control receive window - # `_ - # size. Valid values range from 1 to 16777216 (2^24, maximum supported by - # QUICHE) and defaults to 65536 (2^16). NOTE: 16384 (2^14) is the minimum - # window size supported in Google QUIC. If configured smaller than it, we - # will use 16384 instead. QUICHE IETF Quic implementation supports 1 bytes - # window. We only support increasing the default window size now, so it's - # also the minimum. This field also acts as a soft limit on the number of - # bytes Envoy will buffer per-stream in the QUIC stream send and receive - # buffers. Once the buffer reaches this pointer, watermark callbacks will - # fire to stop the flow of data to the stream buffers. - initial_stream_window_size: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Similar to *initial_stream_window_size*, but for connection-level flow- - # control. Valid values rage from 1 to 25165824 (24MB, maximum supported by - # QUICHE) and defaults to 65536 (2^16). window. Currently, this has the same - # minimum/default as *initial_stream_window_size*. NOTE: 16384 (2^14) is the - # minimum window size supported in Google QUIC. We only support increasing - # the default window size now, so it's also the minimum. - initial_connection_window_size: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # The number of timeouts that can occur before port migration is triggered - # for QUIC clients. This defaults to 1. If set to 0, port migration will not - # occur on path degrading. Timeout here refers to QUIC internal path - # degrading timeout mechanism, such as PTO. This has no effect on server - # sessions. - num_timeouts_to_trigger_port_migration: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Probes the peer at the configured interval to solicit traffic, i.e. ACK or - # PATH_RESPONSE, from the peer to push back connection idle timeout. If - # absent, use the default keepalive behavior of which a client connection - # sends PINGs every 15s, and a server connection doesn't do anything. - connection_keepalive: "QuicKeepAliveSettings" = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class UpstreamHttpProtocolOptions(betterproto.Message): - # Set transport socket `SNI - # `_ for new upstream - # connections based on the downstream HTTP host/authority header or any other - # arbitrary header when :ref:`override_auto_sni_header ` is set, - # as seen by the :ref:`router filter `. - auto_sni: bool = betterproto.bool_field(1) - # Automatic validate upstream presented certificate for new upstream - # connections based on the downstream HTTP host/authority header or any other - # arbitrary header when :ref:`override_auto_sni_header ` is set, - # as seen by the :ref:`router filter `. This - # field is intended to be set with `auto_sni` field. - auto_san_validation: bool = betterproto.bool_field(2) - # An optional alternative to the host/authority header to be used for setting - # the SNI value. It should be a valid downstream HTTP header, as seen by the - # :ref:`router filter `. If unset, host/authority - # header will be used for populating the SNI. If the specified header is not - # found or the value is empty, host/authority header will be used instead. - # This field is intended to be set with `auto_sni` and/or - # `auto_san_validation` fields. If none of these fields are set then setting - # this would be a no-op. - override_auto_sni_header: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class AlternateProtocolsCacheOptions(betterproto.Message): - """ - Configures the alternate protocols cache which tracks alternate protocols - that can be used to make an HTTP connection to an origin server. See - https://tools.ietf.org/html/rfc7838 for HTTP Alternative Services and - https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 for - the "HTTPS" DNS resource record. - """ - - # The name of the cache. Multiple named caches allow independent alternate - # protocols cache configurations to operate within a single Envoy process - # using different configurations. All alternate protocols cache options with - # the same name *must* be equal in all fields when referenced from different - # configuration components. Configuration will fail to load if this is not - # the case. - name: str = betterproto.string_field(1) - # The maximum number of entries that the cache will hold. If not specified - # defaults to 1024. .. note: The implementation is approximate and enforced - # independently on each worker thread, thus it is possible for the maximum - # entries in the cache to go slightly above the configured value depending - # on timing. This is similar to how other circuit breakers work. - max_entries: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Allows configuring a persistent :ref:`key value store - # ` to flush - # alternate protocols entries to disk. This function is currently only - # supported if concurrency is 1 - key_value_store_config: "TypedExtensionConfig" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class HttpProtocolOptions(betterproto.Message): - """[#next-free-field: 7]""" - - # The idle timeout for connections. The idle timeout is defined as the period - # in which there are no active requests. When the idle timeout is reached the - # connection will be closed. If the connection is an HTTP/2 downstream - # connection a drain sequence will occur prior to closing the connection, see - # :ref:`drain_timeout `. Note that request - # based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - # If not specified, this defaults to 1 hour. To disable idle timeouts - # explicitly set this to 0. .. warning:: Disabling this timeout has a - # highly likelihood of yielding connection leaks due to lost TCP FIN - # packets, etc. If the :ref:`overload action - # ` - # "envoy.overload_actions.reduce_timeouts" is configured, this timeout is - # scaled for downstream connections according to the value for - # :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. - idle_timeout: timedelta = betterproto.message_field(1) - # The maximum duration of a connection. The duration is defined as a period - # since a connection was established. If not set, there is no max duration. - # When max_connection_duration is reached and if there are no active streams, - # the connection will be closed. If the connection is a downstream connection - # and there are any active streams, the drain sequence will kick-in, and the - # connection will be force-closed after the drain period. See - # :ref:`drain_timeout `. - max_connection_duration: timedelta = betterproto.message_field(3) - # The maximum number of headers. If unconfigured, the default maximum number - # of request headers allowed is 100. Requests that exceed this limit will - # receive a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - max_headers_count: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Total duration to keep alive an HTTP request/response stream. If the time - # limit is reached the stream will be reset independent of any other - # timeouts. If not specified, this value is not set. - max_stream_duration: timedelta = betterproto.message_field(4) - # Action to take when a client request with a header name containing - # underscore characters is received. If this setting is not specified, the - # value defaults to ALLOW. Note: upstream responses are not affected by this - # setting. Note: this only affects client headers. It does not affect headers - # added by Envoy filters and does not have any impact if added to cluster - # config. - headers_with_underscores_action: "HttpProtocolOptionsHeadersWithUnderscoresAction" = betterproto.enum_field( - 5 - ) - # Optional maximum requests for both upstream and downstream connections. If - # not specified, there is no limit. Setting this parameter to 1 will - # effectively disable keep alive. For HTTP/2 and HTTP/3, due to concurrent - # stream processing, the limit is approximate. - max_requests_per_connection: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class Http1ProtocolOptions(betterproto.Message): - """[#next-free-field: 8]""" - - # Handle HTTP requests with absolute URLs in the requests. These requests are - # generally sent by clients to forward/explicit proxies. This allows clients - # to configure envoy as their HTTP proxy. In Unix, for example, this is - # typically done by setting the *http_proxy* environment variable. - allow_absolute_url: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # Handle incoming HTTP/1.0 and HTTP 0.9 requests. This is off by default, and - # not fully standards compliant. There is support for pre-HTTP/1.1 style - # connect logic, dechunking, and handling lack of client host iff - # *default_host_for_http_10* is configured. - accept_http_10: bool = betterproto.bool_field(2) - # A default host for HTTP/1.0 requests. This is highly suggested if - # *accept_http_10* is true as Envoy does not otherwise support HTTP/1.0 - # without a Host header. This is a no-op if *accept_http_10* is not true. - default_host_for_http_10: str = betterproto.string_field(3) - # Describes how the keys for response headers should be formatted. By - # default, all header keys are lower cased. - header_key_format: "Http1ProtocolOptionsHeaderKeyFormat" = ( - betterproto.message_field(4) - ) - # Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied - # trailers. .. attention:: Note that this only happens when Envoy is chunk - # encoding which occurs when: - The request is HTTP/1.1. - Is neither a - # HEAD only request nor a HTTP Upgrade. - Not a response to a HEAD request. - # - The content length header is not present. - enable_trailers: bool = betterproto.bool_field(5) - # Allows Envoy to process requests/responses with both `Content-Length` and - # `Transfer-Encoding` headers set. By default such messages are rejected, but - # if option is enabled - Envoy will remove Content-Length header and process - # message. See `RFC7230, sec. 3.3.3 - # `_ for details. .. - # attention:: Enabling this option might lead to request smuggling - # vulnerability, especially if traffic is proxied via multiple layers of - # proxies. - allow_chunked_length: bool = betterproto.bool_field(6) - # Allows invalid HTTP messaging. When this option is false, then Envoy will - # terminate HTTP/1.1 connections upon receiving an invalid HTTP message. - # However, when this option is true, then Envoy will leave the HTTP/1.1 - # connection open where possible. If set, this overrides any HCM - # :ref:`stream_error_on_invalid_http_messaging `. - override_stream_error_on_invalid_http_message: Optional[ - bool - ] = betterproto.message_field(7, wraps=betterproto.TYPE_BOOL) - - -@dataclass(eq=False, repr=False) -class Http1ProtocolOptionsHeaderKeyFormat(betterproto.Message): - """[#next-free-field: 9]""" - - # Formats the header by proper casing words: the first character and any - # character following a special character will be capitalized if it's an - # alpha character. For example, "content-type" becomes "Content-Type", and - # "foo$b#$are" becomes "Foo$B#$Are". Note that while this results in most - # headers following conventional casing, certain headers are not covered. For - # example, the "TE" header will be formatted as "Te". - proper_case_words: "Http1ProtocolOptionsHeaderKeyFormatProperCaseWords" = ( - betterproto.message_field(1, group="header_format") - ) - # Configuration for stateful formatter extensions that allow using received - # headers to affect the output of encoding headers. E.g., preserving case - # during proxying. [#extension-category: - # envoy.http.stateful_header_formatters] - stateful_formatter: "TypedExtensionConfig" = betterproto.message_field( - 8, group="header_format" - ) - - -@dataclass(eq=False, repr=False) -class Http1ProtocolOptionsHeaderKeyFormatProperCaseWords(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class KeepaliveSettings(betterproto.Message): - # Send HTTP/2 PING frames at this period, in order to test that the - # connection is still alive. If this is zero, interval PINGs will not be - # sent. - interval: timedelta = betterproto.message_field(1) - # How long to wait for a response to a keepalive PING. If a response is not - # received within this time period, the connection will be aborted. - timeout: timedelta = betterproto.message_field(2) - # A random jitter amount as a percentage of interval that will be added to - # each interval. A value of zero means there will be no jitter. The default - # value is 15%. - interval_jitter: "___type_v3__.Percent" = betterproto.message_field(3) - # If the connection has been idle for this duration, send a HTTP/2 ping ahead - # of new stream creation, to quickly detect dead connections. If this is - # zero, this type of PING will not be sent. If an interval ping is - # outstanding, a second ping will not be sent as the interval ping will - # determine if the connection is dead. The same feature for HTTP/3 is given - # by inheritance from QUICHE which uses :ref:`connection idle_timeout - # ` - # and the current PTO of the connection to decide whether to probe before - # sending a new request. - connection_idle_interval: timedelta = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class Http2ProtocolOptions(betterproto.Message): - """[#next-free-field: 16]""" - - # `Maximum table size - # `_ (in octets) that - # the encoder is permitted to use for the dynamic HPACK table. Valid values - # range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively - # disables header compression. - hpack_table_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # `Maximum concurrent streams - # `_ allowed for - # peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 - # (2^31 - 1) and defaults to 2147483647. For upstream connections, this also - # limits how many streams Envoy will initiate concurrently on a single - # connection. If the limit is reached, Envoy may queue requests or establish - # additional connections (as allowed per circuit breaker limits). This acts - # as an upper bound: Envoy will lower the max concurrent streams allowed on a - # given connection based on upstream settings. Config dumps will reflect the - # configured upper bound, not the per-connection negotiated limits. - max_concurrent_streams: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # `Initial stream-level flow-control window - # `_ size. Valid - # values range from 65535 (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, - # HTTP/2 maximum) and defaults to 268435456 (256 * 1024 * 1024). NOTE: 65535 - # is the initial window size from HTTP/2 spec. We only support increasing the - # default window size now, so it's also the minimum. This field also acts as - # a soft limit on the number of bytes Envoy will buffer per-stream in the - # HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark - # callbacks will fire to stop the flow of data to the codec buffers. - initial_stream_window_size: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Similar to *initial_stream_window_size*, but for connection-level flow- - # control window. Currently, this has the same minimum/maximum/default as - # *initial_stream_window_size*. - initial_connection_window_size: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Allows proxying Websocket and other upgrades over H2 connect. - allow_connect: bool = betterproto.bool_field(5) - # [#not-implemented-hide:] Hiding until envoy has full metadata support. - # Still under implementation. DO NOT USE. Allows metadata. See [metadata docs - # ](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) - # for more information. - allow_metadata: bool = betterproto.bool_field(6) - # Limit the number of pending outbound downstream frames of all types (frames - # that are waiting to be written into the socket). Exceeding this limit - # triggers flood mitigation and connection is terminated. The - # ``http2.outbound_flood`` stat tracks the number of terminated connections - # due to flood mitigation. The default limit is 10000. - max_outbound_frames: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # Limit the number of pending outbound downstream frames of types PING, - # SETTINGS and RST_STREAM, preventing high memory utilization when receiving - # continuous stream of these frames. Exceeding this limit triggers flood - # mitigation and connection is terminated. The - # ``http2.outbound_control_flood`` stat tracks the number of terminated - # connections due to flood mitigation. The default limit is 1000. - max_outbound_control_frames: Optional[int] = betterproto.message_field( - 8, wraps=betterproto.TYPE_UINT32 - ) - # Limit the number of consecutive inbound frames of types HEADERS, - # CONTINUATION and DATA with an empty payload and no end stream flag. Those - # frames have no legitimate use and are abusive, but might be a result of a - # broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` stat - # tracks the number of connections terminated due to flood mitigation. - # Setting this to 0 will terminate connection upon receiving first frame with - # an empty payload and no end stream flag. The default limit is 1. - max_consecutive_inbound_frames_with_empty_payload: Optional[ - int - ] = betterproto.message_field(9, wraps=betterproto.TYPE_UINT32) - # Limit the number of inbound PRIORITY frames allowed per each opened stream. - # If the number of PRIORITY frames received over the lifetime of connection - # exceeds the value calculated using this formula:: - # max_inbound_priority_frames_per_stream * (1 + opened_streams) the - # connection is terminated. For downstream connections the `opened_streams` - # is incremented when Envoy receives complete response headers from the - # upstream server. For upstream connection the `opened_streams` is - # incremented when Envoy send the HEADERS frame for a new stream. The - # ``http2.inbound_priority_frames_flood`` stat tracks the number of - # connections terminated due to flood mitigation. The default limit is 100. - max_inbound_priority_frames_per_stream: Optional[int] = betterproto.message_field( - 10, wraps=betterproto.TYPE_UINT32 - ) - # Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame - # sent. If the number of WINDOW_UPDATE frames received over the lifetime of - # connection exceeds the value calculated using this formula:: 5 + 2 * - # (opened_streams + - # max_inbound_window_update_frames_per_data_frame_sent * - # outbound_data_frames) the connection is terminated. For downstream - # connections the `opened_streams` is incremented when Envoy receives - # complete response headers from the upstream server. For upstream - # connections the `opened_streams` is incremented when Envoy sends the - # HEADERS frame for a new stream. The ``http2.inbound_priority_frames_flood`` - # stat tracks the number of connections terminated due to flood mitigation. - # The default max_inbound_window_update_frames_per_data_frame_sent value is - # 10. Setting this to 1 should be enough to support HTTP/2 implementations - # with basic flow control, but more complex implementations that try to - # estimate available bandwidth require at least 2. - max_inbound_window_update_frames_per_data_frame_sent: Optional[ - int - ] = betterproto.message_field(11, wraps=betterproto.TYPE_UINT32) - # Allows invalid HTTP messaging and headers. When this option is disabled - # (default), then the whole HTTP/2 connection is terminated upon receiving - # invalid HEADERS frame. However, when this option is enabled, only the - # offending stream is terminated. This is overridden by HCM - # :ref:`stream_error_on_invalid_http_messaging ` iff present. This is deprecated in favor of - # :ref:`override_stream_error_on_invalid_http_message ` See `RFC7540, sec. 8.1 - # `_ for details. - stream_error_on_invalid_http_messaging: bool = betterproto.bool_field(12) - # Allows invalid HTTP messaging and headers. When this option is disabled - # (default), then the whole HTTP/2 connection is terminated upon receiving - # invalid HEADERS frame. However, when this option is enabled, only the - # offending stream is terminated. This overrides any HCM - # :ref:`stream_error_on_invalid_http_messaging ` See `RFC7540, sec. 8.1 - # `_ for details. - override_stream_error_on_invalid_http_message: Optional[ - bool - ] = betterproto.message_field(14, wraps=betterproto.TYPE_BOOL) - # [#not-implemented-hide:] Specifies SETTINGS frame parameters to be sent to - # the peer, with two exceptions: 1. SETTINGS_ENABLE_PUSH (0x2) is not - # configurable as HTTP/2 server push is not supported by Envoy. 2. - # SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the - # named field 'allow_connect'. Note that custom parameters specified through - # this field can not also be set in the corresponding named parameters: .. - # code-block:: text ID Field Name ---------------- 0x1 - # hpack_table_size 0x3 max_concurrent_streams 0x4 - # initial_stream_window_size Collisions will trigger config validation - # failure on load/update. Likewise, inconsistencies between custom parameters - # with the same identifier will trigger a failure. See `IANA HTTP/2 Settings - # `_ for standardized identifiers. - custom_settings_parameters: List[ - "Http2ProtocolOptionsSettingsParameter" - ] = betterproto.message_field(13) - # Send HTTP/2 PING frames to verify that the connection is still healthy. If - # the remote peer does not respond within the configured timeout, the - # connection will be aborted. - connection_keepalive: "KeepaliveSettings" = betterproto.message_field(15) - - def __post_init__(self) -> None: - super().__post_init__() - if self.stream_error_on_invalid_http_messaging: - warnings.warn( - "Http2ProtocolOptions.stream_error_on_invalid_http_messaging is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class Http2ProtocolOptionsSettingsParameter(betterproto.Message): - """ - Defines a parameter to be sent in the SETTINGS frame. See `RFC7540, sec. - 6.5.1 `_ for details. - """ - - # The 16 bit parameter identifier. - identifier: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # The 32 bit parameter value. - value: Optional[int] = betterproto.message_field(2, wraps=betterproto.TYPE_UINT32) - - -@dataclass(eq=False, repr=False) -class GrpcProtocolOptions(betterproto.Message): - """[#not-implemented-hide:]""" - - http2_protocol_options: "Http2ProtocolOptions" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Http3ProtocolOptions(betterproto.Message): - """A message which allows using HTTP/3. [#next-free-field: 6]""" - - quic_protocol_options: "QuicProtocolOptions" = betterproto.message_field(1) - # Allows invalid HTTP messaging and headers. When this option is disabled - # (default), then the whole HTTP/3 connection is terminated upon receiving - # invalid HEADERS frame. However, when this option is enabled, only the - # offending stream is terminated. If set, this overrides any HCM - # :ref:`stream_error_on_invalid_http_messaging `. - override_stream_error_on_invalid_http_message: Optional[ - bool - ] = betterproto.message_field(2, wraps=betterproto.TYPE_BOOL) - # Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using the - # header mechanisms from the `HTTP/2 extended connect RFC - # `_ and settings `proposed - # for HTTP/3 `_ Note that HTTP/3 CONNECT is not yet an RFC. - allow_extended_connect: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class SchemeHeaderTransformation(betterproto.Message): - """A message to control transformations to the :scheme header""" - - # Overwrite any Scheme header with the contents of this string. - scheme_to_overwrite: str = betterproto.string_field(1, group="transformation") - - -@dataclass(eq=False, repr=False) -class DnsResolverOptions(betterproto.Message): - """ - Configuration of DNS resolver option flags which control the behavior of - the DNS resolver. - """ - - # Use TCP for all DNS queries instead of the default protocol UDP. - use_tcp_for_dns_lookups: bool = betterproto.bool_field(1) - # Do not use the default search domains; only query hostnames as-is or as - # aliases. - no_default_search_domain: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class DnsResolutionConfig(betterproto.Message): - """ - DNS resolution configuration which includes the underlying dns resolver - addresses and options. - """ - - # A list of dns resolver addresses. If specified, the DNS client library will - # perform resolution via the underlying DNS resolvers. Otherwise, the default - # system resolvers (e.g., /etc/resolv.conf) will be used. - resolvers: List["Address"] = betterproto.message_field(1) - # Configuration of DNS resolver option flags which control the behavior of - # the DNS resolver. - dns_resolver_options: "DnsResolverOptions" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class UdpSocketConfig(betterproto.Message): - """Generic UDP socket configuration.""" - - # The maximum size of received UDP datagrams. Using a larger size will cause - # Envoy to allocate more memory per socket. Received datagrams above this - # size will be dropped. If not set defaults to 1500 bytes. - max_rx_datagram_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT64 - ) - # Configures whether Generic Receive Offload (GRO) - # _ is preferred when - # reading from the UDP socket. The default is context dependent and is - # documented where UdpSocketConfig is used. This option affects performance - # but not functionality. If GRO is not supported by the operating system, - # non-GRO receive will be used. - prefer_gro: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class SubstitutionFormatString(betterproto.Message): - """ - Configuration to use multiple :ref:`command operators - ` to generate a new string in either - plain text or JSON format. [#next-free-field: 7] - """ - - # Specify a format with command operators to form a text string. Its details - # is described in :ref:`format string`. For - # example, setting ``text_format`` like below, .. validated-code-block:: yaml - # :type-name: envoy.config.core.v3.SubstitutionFormatString text_format: - # "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" generates plain - # text similar to: .. code-block:: text upstream connect - # error:503:path=/foo Deprecated in favor of :ref:`text_format_source `. - # To migrate text format strings, use the :ref:`inline_string - # ` field. - text_format: str = betterproto.string_field(1, group="format") - # Specify a format with command operators to form a JSON string. Its details - # is described in :ref:`format - # dictionary`. Values are rendered as - # strings, numbers, or boolean values as appropriate. Nested JSON objects may - # be produced by some command operators (e.g. FILTER_STATE or - # DYNAMIC_METADATA). See the documentation for a specific command operator - # for details. .. validated-code-block:: yaml :type-name: - # envoy.config.core.v3.SubstitutionFormatString json_format: status: - # "%RESPONSE_CODE%" message: "%LOCAL_REPLY_BODY%" The following JSON - # object would be created: .. code-block:: json { "status": 500, - # "message": "My error message" } - json_format: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="format" - ) - # Specify a format with command operators to form a text string. Its details - # is described in :ref:`format string`. For - # example, setting ``text_format`` like below, .. validated-code-block:: yaml - # :type-name: envoy.config.core.v3.SubstitutionFormatString - # text_format_source: inline_string: - # "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" generates plain - # text similar to: .. code-block:: text upstream connect - # error:503:path=/foo - text_format_source: "DataSource" = betterproto.message_field(5, group="format") - # If set to true, when command operators are evaluated to null, * for - # ``text_format``, the output of the empty operator is changed from ``-`` to - # an empty string, so that empty values are omitted entirely. * for - # ``json_format`` the keys with null values are omitted in the output - # structure. - omit_empty_values: bool = betterproto.bool_field(3) - # Specify a *content_type* field. If this field is not set then - # ``text/plain`` is used for *text_format* and ``application/json`` is used - # for *json_format*. .. validated-code-block:: yaml :type-name: - # envoy.config.core.v3.SubstitutionFormatString content_type: "text/html; - # charset=UTF-8" - content_type: str = betterproto.string_field(4) - # Specifies a collection of Formatter plugins that can be called from the - # access log configuration. See the formatters extensions documentation for - # details. [#extension-category: envoy.formatter] - formatters: List["TypedExtensionConfig"] = betterproto.message_field(6) - - def __post_init__(self) -> None: - super().__post_init__() - if self.text_format: - warnings.warn( - "SubstitutionFormatString.text_format is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class GrpcMethodList(betterproto.Message): - """ - A list of gRPC methods which can be used as an allowlist, for example. - """ - - services: List["GrpcMethodListService"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcMethodListService(betterproto.Message): - # The name of the gRPC service. - name: str = betterproto.string_field(1) - # The names of the gRPC methods in this service. - method_names: List[str] = betterproto.string_field(2) - - -from .....xds.core import v3 as ____xds_core_v3__ -from ....type import v3 as ___type_v3__ -from ....type.matcher import v3 as ___type_matcher_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/endpoint/__init__.py b/src/envoy_data_plane/envoy/config/endpoint/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/endpoint/v3/__init__.py b/src/envoy_data_plane/envoy/config/endpoint/v3/__init__.py deleted file mode 100644 index 62847ac..0000000 --- a/src/envoy_data_plane/envoy/config/endpoint/v3/__init__.py +++ /dev/null @@ -1,361 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/endpoint/v3/endpoint.proto, envoy/config/endpoint/v3/endpoint_components.proto, envoy/config/endpoint/v3/load_report.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Endpoint(betterproto.Message): - """Upstream host identifier.""" - - # The upstream host address. .. attention:: The form of host address - # depends on the given cluster type. For STATIC or EDS, it is expected to - # be a direct IP address (or something resolvable by the specified - # :ref:`resolver - # ` in the - # Address). For LOGICAL or STRICT DNS, it is expected to be hostname, and - # will be resolved via DNS. - address: "__core_v3__.Address" = betterproto.message_field(1) - # The optional health check configuration is used as configuration for the - # health checker to contact the health checked host. .. attention:: This - # takes into effect only for upstream clusters with :ref:`active health - # checking ` enabled. - health_check_config: "EndpointHealthCheckConfig" = betterproto.message_field(2) - # The hostname associated with this endpoint. This hostname is not used for - # routing or address resolution. If provided, it will be associated with the - # endpoint, and can be used for features that require a hostname, like - # :ref:`auto_host_rewrite - # `. - hostname: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class EndpointHealthCheckConfig(betterproto.Message): - """The optional health check configuration.""" - - # Optional alternative health check port value. By default the health check - # address port of an upstream host is the same as the host's serving address - # port. This provides an alternative health check port. Setting this with a - # non-zero value allows an upstream host to have different health check - # address port. - port_value: int = betterproto.uint32_field(1) - # By default, the host header for L7 health checks is controlled by cluster - # level configuration (see: :ref:`host - # ` and - # :ref:`authority `). Setting this to a non-empty value allows overriding the - # cluster level configuration for a specific endpoint. - hostname: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class LbEndpoint(betterproto.Message): - """An Endpoint that Envoy can route traffic to. [#next-free-field: 6]""" - - endpoint: "Endpoint" = betterproto.message_field(1, group="host_identifier") - # [#not-implemented-hide:] - endpoint_name: str = betterproto.string_field(5, group="host_identifier") - # Optional health status when known and supplied by EDS server. - health_status: "__core_v3__.HealthStatus" = betterproto.enum_field(2) - # The endpoint metadata specifies values that may be used by the load - # balancer to select endpoints in a cluster for a given request. The filter - # name should be specified as *envoy.lb*. An example boolean key-value pair - # is *canary*, providing the optional canary status of the upstream host. - # This may be matched against in a route's :ref:`RouteAction - # ` metadata_match field to - # subset the endpoints considered in cluster load balancing. - metadata: "__core_v3__.Metadata" = betterproto.message_field(3) - # The optional load balancing weight of the upstream host; at least 1. Envoy - # uses the load balancing weight in some of the built in load balancers. The - # load balancing weight for an endpoint is divided by the sum of the weights - # of all endpoints in the endpoint's locality to produce a percentage of - # traffic for the endpoint. This percentage is then further weighted by the - # endpoint's locality's load balancing weight from LocalityLbEndpoints. If - # unspecified, each host is presumed to have equal weight in a locality. The - # sum of the weights of all endpoints in the endpoint's locality must not - # exceed uint32_t maximal value (4294967295). - load_balancing_weight: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class LedsClusterLocalityConfig(betterproto.Message): - """[#not-implemented-hide:] A configuration for a LEDS collection.""" - - # Configuration for the source of LEDS updates for a Locality. - leds_config: "__core_v3__.ConfigSource" = betterproto.message_field(1) - # The xDS transport protocol glob collection resource name. The service is - # only supported in delta xDS (incremental) mode. - leds_collection_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class LocalityLbEndpoints(betterproto.Message): - """ - A group of endpoints belonging to a Locality. One can have multiple - LocalityLbEndpoints for a locality, but this is generally only done if the - different groups need to have different load balancing weights or different - priorities. [#next-free-field: 9] - """ - - # Identifies location of where the upstream hosts run. - locality: "__core_v3__.Locality" = betterproto.message_field(1) - # The group of endpoints belonging to the locality specified. - # [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be - # deprecated and replaced by *load_balancer_endpoints*.] - lb_endpoints: List["LbEndpoint"] = betterproto.message_field(2) - # The group of endpoints belonging to the locality. - # [#comment:TODO(adisuissa): Once LEDS is implemented the *lb_endpoints* - # field needs to be deprecated.] - load_balancer_endpoints: "LocalityLbEndpointsLbEndpointList" = ( - betterproto.message_field(7, group="lb_config") - ) - # LEDS Configuration for the current locality. - leds_cluster_locality_config: "LedsClusterLocalityConfig" = ( - betterproto.message_field(8, group="lb_config") - ) - # Optional: Per priority/region/zone/sub_zone weight; at least 1. The load - # balancing weight for a locality is divided by the sum of the weights of all - # localities at the same priority level to produce the effective percentage - # of traffic for the locality. The sum of the weights of all localities at - # the same priority level must not exceed uint32_t maximal value - # (4294967295). Locality weights are only considered when :ref:`locality - # weighted load balancing - # ` is configured. These - # weights are ignored otherwise. If no weights are specified when locality - # weighted load balancing is enabled, the locality is assigned no load. - load_balancing_weight: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Optional: the priority for this LocalityLbEndpoints. If unspecified this - # will default to the highest priority (0). Under usual circumstances, Envoy - # will only select endpoints for the highest priority (0). In the event all - # endpoints for a particular priority are unavailable/unhealthy, Envoy will - # fail over to selecting endpoints for the next highest priority group. - # Priorities should range from 0 (highest) to N (lowest) without skipping. - priority: int = betterproto.uint32_field(5) - # Optional: Per locality proximity value which indicates how close this - # locality is from the source locality. This value only provides ordering - # information (lower the value, closer it is to the source locality). This - # will be consumed by load balancing schemes that need proximity order to - # determine where to route the requests. [#not-implemented-hide:] - proximity: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class LocalityLbEndpointsLbEndpointList(betterproto.Message): - """[#not-implemented-hide:] A list of endpoints of a specific locality.""" - - lb_endpoints: List["LbEndpoint"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ClusterLoadAssignment(betterproto.Message): - """ - Each route from RDS will map to a single cluster or traffic split across - clusters using weights expressed in the RDS WeightedCluster. With EDS, each - cluster is treated independently from a LB perspective, with LB taking - place between the Localities within a cluster and at a finer granularity - between the hosts within a locality. The percentage of traffic for each - endpoint is determined by both its load_balancing_weight, and the - load_balancing_weight of its locality. First, a locality will be selected, - then an endpoint within that locality will be chose based on its weight. - [#next-free-field: 6] - """ - - # Name of the cluster. This will be the :ref:`service_name ` value if - # specified in the cluster :ref:`EdsClusterConfig - # `. - cluster_name: str = betterproto.string_field(1) - # List of endpoints to load balance to. - endpoints: List["LocalityLbEndpoints"] = betterproto.message_field(2) - # Map of named endpoints that can be referenced in LocalityLbEndpoints. - # [#not-implemented-hide:] - named_endpoints: Dict[str, "Endpoint"] = betterproto.map_field( - 5, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - # Load balancing policy settings. - policy: "ClusterLoadAssignmentPolicy" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterLoadAssignmentPolicy(betterproto.Message): - """Load balancing policy settings. [#next-free-field: 6]""" - - # Action to trim the overall incoming traffic to protect the upstream hosts. - # This action allows protection in case the hosts are unable to recover from - # an outage, or unable to autoscale or unable to handle incoming traffic - # volume for any reason. At the client each category is applied one after the - # other to generate the 'actual' drop percentage on all outgoing traffic. For - # example: .. code-block:: json { "drop_overloads": [ { "category": - # "throttle", "drop_percentage": 60 } { "category": "lb", - # "drop_percentage": 50 } ]} The actual drop percentages applied to the - # traffic at the clients will be "throttle"_drop = 60% "lb"_drop = 20% - # // 50% of the remaining 'actual' load, which is 40%. - # actual_outgoing_load = 20% // remaining after applying all categories. - # [#not-implemented-hide:] - drop_overloads: List[ - "ClusterLoadAssignmentPolicyDropOverload" - ] = betterproto.message_field(2) - # Priority levels and localities are considered overprovisioned with this - # factor (in percentage). This means that we don't consider a priority level - # or locality unhealthy until the fraction of healthy hosts multiplied by the - # overprovisioning factor drops below 100. With the default value 140(1.4), - # Envoy doesn't consider a priority level or a locality unhealthy until their - # percentage of healthy hosts drops below 72%. For example: .. code-block:: - # json { "overprovisioning_factor": 100 } Read more at :ref:`priority levels - # ` and :ref:`localities - # `. - overprovisioning_factor: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # The max time until which the endpoints from this assignment can be used. If - # no new assignments are received before this time expires the endpoints are - # considered stale and should be marked unhealthy. Defaults to 0 which means - # endpoints never go stale. - endpoint_stale_after: timedelta = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterLoadAssignmentPolicyDropOverload(betterproto.Message): - """[#not-implemented-hide:]""" - - # Identifier for the policy specifying the drop. - category: str = betterproto.string_field(1) - # Percentage of traffic that should be dropped for the category. - drop_percentage: "___type_v3__.FractionalPercent" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class UpstreamLocalityStats(betterproto.Message): - """ - These are stats Envoy reports to the management server at a frequency - defined by :ref:`LoadStatsResponse.load_reporting_interval`. Stats - per upstream region/zone and optionally per subzone. [#next-free-field: 9] - """ - - # Name of zone, region and optionally endpoint group these metrics were - # collected from. Zone and region names could be empty if unknown. - locality: "__core_v3__.Locality" = betterproto.message_field(1) - # The total number of requests successfully completed by the endpoints in the - # locality. - total_successful_requests: int = betterproto.uint64_field(2) - # The total number of unfinished requests - total_requests_in_progress: int = betterproto.uint64_field(3) - # The total number of requests that failed due to errors at the endpoint, - # aggregated over all endpoints in the locality. - total_error_requests: int = betterproto.uint64_field(4) - # The total number of requests that were issued by this Envoy since the last - # report. This information is aggregated over all the upstream endpoints in - # the locality. - total_issued_requests: int = betterproto.uint64_field(8) - # Stats for multi-dimensional load balancing. - load_metric_stats: List["EndpointLoadMetricStats"] = betterproto.message_field(5) - # Endpoint granularity stats information for this locality. This information - # is populated if the Server requests it by setting :ref:`LoadStatsResponse.r - # eport_endpoint_granularity`. - upstream_endpoint_stats: List["UpstreamEndpointStats"] = betterproto.message_field( - 7 - ) - # [#not-implemented-hide:] The priority of the endpoint group these metrics - # were collected from. - priority: int = betterproto.uint32_field(6) - - -@dataclass(eq=False, repr=False) -class UpstreamEndpointStats(betterproto.Message): - """[#next-free-field: 8]""" - - # Upstream host address. - address: "__core_v3__.Address" = betterproto.message_field(1) - # Opaque and implementation dependent metadata of the endpoint. Envoy will - # pass this directly to the management server. - metadata: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(6) - # The total number of requests successfully completed by the endpoints in the - # locality. These include non-5xx responses for HTTP, where errors originate - # at the client and the endpoint responded successfully. For gRPC, the grpc- - # status values are those not covered by total_error_requests below. - total_successful_requests: int = betterproto.uint64_field(2) - # The total number of unfinished requests for this endpoint. - total_requests_in_progress: int = betterproto.uint64_field(3) - # The total number of requests that failed due to errors at the endpoint. For - # HTTP these are responses with 5xx status codes and for gRPC the grpc-status - # values: - DeadlineExceeded - Unimplemented - Internal - Unavailable - # - Unknown - DataLoss - total_error_requests: int = betterproto.uint64_field(4) - # The total number of requests that were issued to this endpoint since the - # last report. A single TCP connection, HTTP or gRPC request or stream is - # counted as one request. - total_issued_requests: int = betterproto.uint64_field(7) - # Stats for multi-dimensional load balancing. - load_metric_stats: List["EndpointLoadMetricStats"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class EndpointLoadMetricStats(betterproto.Message): - # Name of the metric; may be empty. - metric_name: str = betterproto.string_field(1) - # Number of calls that finished and included this metric. - num_requests_finished_with_metric: int = betterproto.uint64_field(2) - # Sum of metric values across all calls that finished with this metric for - # load_reporting_interval. - total_metric_value: float = betterproto.double_field(3) - - -@dataclass(eq=False, repr=False) -class ClusterStats(betterproto.Message): - """ - Per cluster load stats. Envoy reports these stats a management server in a - :ref:`LoadStatsRequest` Next ID: 7 [#next-free-field: 7] - """ - - # The name of the cluster. - cluster_name: str = betterproto.string_field(1) - # The eds_cluster_config service_name of the cluster. It's possible that two - # clusters send the same service_name to EDS, in that case, the management - # server is supposed to do aggregation on the load reports. - cluster_service_name: str = betterproto.string_field(6) - # Need at least one. - upstream_locality_stats: List["UpstreamLocalityStats"] = betterproto.message_field( - 2 - ) - # Cluster-level stats such as total_successful_requests may be computed by - # summing upstream_locality_stats. In addition, below there are additional - # cluster-wide stats. The total number of dropped requests. This covers - # requests deliberately dropped by the drop_overload policy and circuit - # breaking. - total_dropped_requests: int = betterproto.uint64_field(3) - # Information about deliberately dropped requests for each category specified - # in the DropOverload policy. - dropped_requests: List["ClusterStatsDroppedRequests"] = betterproto.message_field(5) - # Period over which the actual load report occurred. This will be guaranteed - # to include every request reported. Due to system load and delays between - # the *LoadStatsRequest* sent from Envoy and the *LoadStatsResponse* message - # sent from the management server, this may be longer than the requested load - # reporting interval in the *LoadStatsResponse*. - load_report_interval: timedelta = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ClusterStatsDroppedRequests(betterproto.Message): - # Identifier for the policy specifying the drop. - category: str = betterproto.string_field(1) - # Total number of deliberately dropped requests for the category. - dropped_count: int = betterproto.uint64_field(2) - - -from ....type import v3 as ___type_v3__ -from ...core import v3 as __core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/__init__.py b/src/envoy_data_plane/envoy/config/filter/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/accesslog/__init__.py b/src/envoy_data_plane/envoy/config/filter/accesslog/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/accesslog/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/accesslog/v2/__init__.py deleted file mode 100644 index f4836e2..0000000 --- a/src/envoy_data_plane/envoy/config/filter/accesslog/v2/__init__.py +++ /dev/null @@ -1,263 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/accesslog/v2/accesslog.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ComparisonFilterOp(betterproto.Enum): - EQ = 0 - GE = 1 - LE = 2 - - -class GrpcStatusFilterStatus(betterproto.Enum): - OK = 0 - CANCELED = 1 - UNKNOWN = 2 - INVALID_ARGUMENT = 3 - DEADLINE_EXCEEDED = 4 - NOT_FOUND = 5 - ALREADY_EXISTS = 6 - PERMISSION_DENIED = 7 - RESOURCE_EXHAUSTED = 8 - FAILED_PRECONDITION = 9 - ABORTED = 10 - OUT_OF_RANGE = 11 - UNIMPLEMENTED = 12 - INTERNAL = 13 - UNAVAILABLE = 14 - DATA_LOSS = 15 - UNAUTHENTICATED = 16 - - -@dataclass(eq=False, repr=False) -class AccessLog(betterproto.Message): - # The name of the access log implementation to instantiate. The name must - # match a statically registered access log. Current built-in loggers include: - # #. "envoy.access_loggers.file" #. "envoy.access_loggers.http_grpc" #. - # "envoy.access_loggers.tcp_grpc" - name: str = betterproto.string_field(1) - # Filter which is used to determine if the access log needs to be written. - filter: "AccessLogFilter" = betterproto.message_field(2) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 3, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 4, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("AccessLog.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class AccessLogFilter(betterproto.Message): - """[#next-free-field: 12]""" - - # Status code filter. - status_code_filter: "StatusCodeFilter" = betterproto.message_field( - 1, group="filter_specifier" - ) - # Duration filter. - duration_filter: "DurationFilter" = betterproto.message_field( - 2, group="filter_specifier" - ) - # Not health check filter. - not_health_check_filter: "NotHealthCheckFilter" = betterproto.message_field( - 3, group="filter_specifier" - ) - # Traceable filter. - traceable_filter: "TraceableFilter" = betterproto.message_field( - 4, group="filter_specifier" - ) - # Runtime filter. - runtime_filter: "RuntimeFilter" = betterproto.message_field( - 5, group="filter_specifier" - ) - # And filter. - and_filter: "AndFilter" = betterproto.message_field(6, group="filter_specifier") - # Or filter. - or_filter: "OrFilter" = betterproto.message_field(7, group="filter_specifier") - # Header filter. - header_filter: "HeaderFilter" = betterproto.message_field( - 8, group="filter_specifier" - ) - # Response flag filter. - response_flag_filter: "ResponseFlagFilter" = betterproto.message_field( - 9, group="filter_specifier" - ) - # gRPC status filter. - grpc_status_filter: "GrpcStatusFilter" = betterproto.message_field( - 10, group="filter_specifier" - ) - # Extension filter. - extension_filter: "ExtensionFilter" = betterproto.message_field( - 11, group="filter_specifier" - ) - - -@dataclass(eq=False, repr=False) -class ComparisonFilter(betterproto.Message): - """Filter on an integer comparison.""" - - # Comparison operator. - op: "ComparisonFilterOp" = betterproto.enum_field(1) - # Value to compare against. - value: "____api_v2_core__.RuntimeUInt32" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StatusCodeFilter(betterproto.Message): - """Filters on HTTP response/status code.""" - - # Comparison. - comparison: "ComparisonFilter" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DurationFilter(betterproto.Message): - """Filters on total request duration in milliseconds.""" - - # Comparison. - comparison: "ComparisonFilter" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class NotHealthCheckFilter(betterproto.Message): - """ - Filters for requests that are not health check requests. A health check - request is marked by the health check filter. - """ - - pass - - -@dataclass(eq=False, repr=False) -class TraceableFilter(betterproto.Message): - """ - Filters for requests that are traceable. See the tracing overview for more - information on how a request becomes traceable. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RuntimeFilter(betterproto.Message): - """Filters for random sampling of requests.""" - - # Runtime key to get an optional overridden numerator for use in the - # *percent_sampled* field. If found in runtime, this value will replace the - # default numerator. - runtime_key: str = betterproto.string_field(1) - # The default sampling percentage. If not specified, defaults to 0% with - # denominator of 100. - percent_sampled: "____type__.FractionalPercent" = betterproto.message_field(2) - # By default, sampling pivots on the header :ref:`x-request- - # id` being present. If - # :ref:`x-request-id` is present, - # the filter will consistently sample across multiple hosts based on the - # runtime key value and the value extracted from :ref:`x-request- - # id`. If it is missing, or - # *use_independent_randomness* is set to true, the filter will randomly - # sample based on the runtime key value alone. *use_independent_randomness* - # can be used for logging kill switches within complex nested :ref:`AndFilter - # ` and :ref:`OrFilter - # ` blocks that are easier - # to reason about from a probability perspective (i.e., setting to true will - # cause the filter to behave like an independent random variable when - # composed within logical operator filters). - use_independent_randomness: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class AndFilter(betterproto.Message): - """ - Performs a logical “and” operation on the result of each filter in filters. - Filters are evaluated sequentially and if one of them returns false, the - filter returns false immediately. - """ - - filters: List["AccessLogFilter"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class OrFilter(betterproto.Message): - """ - Performs a logical “or” operation on the result of each individual filter. - Filters are evaluated sequentially and if one of them returns true, the - filter returns true immediately. - """ - - filters: List["AccessLogFilter"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HeaderFilter(betterproto.Message): - """Filters requests based on the presence or value of a request header.""" - - # Only requests with a header which matches the specified HeaderMatcher will - # pass the filter check. - header: "____api_v2_route__.HeaderMatcher" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ResponseFlagFilter(betterproto.Message): - """ - Filters requests that received responses with an Envoy response flag set. A - list of the response flags can be found in the access log formatter - :ref:`documentation`. - """ - - # Only responses with the any of the flags listed in this field will be - # logged. This field is optional. If it is not specified, then any response - # flag will pass the filter check. - flags: List[str] = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class GrpcStatusFilter(betterproto.Message): - """ - Filters gRPC requests based on their response status. If a gRPC status is - not provided, the filter will infer the status from the HTTP status code. - """ - - # Logs only responses that have any one of the gRPC statuses in this field. - statuses: List["GrpcStatusFilterStatus"] = betterproto.enum_field(1) - # If included and set to true, the filter will instead block all responses - # with a gRPC status or inferred gRPC status enumerated in statuses, and - # allow all other responses. - exclude: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class ExtensionFilter(betterproto.Message): - """Extension filter is statically registered at runtime.""" - - # The name of the filter implementation to instantiate. The name must match a - # statically registered filter. - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("ExtensionFilter.config is deprecated", DeprecationWarning) - - -from ..... import type as ____type__ -from .....api.v2 import core as ____api_v2_core__ -from .....api.v2 import route as ____api_v2_route__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/dubbo/__init__.py b/src/envoy_data_plane/envoy/config/filter/dubbo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/dubbo/router/__init__.py b/src/envoy_data_plane/envoy/config/filter/dubbo/router/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/dubbo/router/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/dubbo/router/v2alpha1/__init__.py deleted file mode 100644 index 4a02733..0000000 --- a/src/envoy_data_plane/envoy/config/filter/dubbo/router/v2alpha1/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/dubbo/router/v2alpha1/router.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Router(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/fault/__init__.py b/src/envoy_data_plane/envoy/config/filter/fault/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/fault/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/fault/v2/__init__.py deleted file mode 100644 index bc2ce03..0000000 --- a/src/envoy_data_plane/envoy/config/filter/fault/v2/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/fault/v2/fault.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class FaultDelayFaultDelayType(betterproto.Enum): - FIXED = 0 - - -@dataclass(eq=False, repr=False) -class FaultDelay(betterproto.Message): - """ - Delay specification is used to inject latency into the - HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. - [#next-free-field: 6] - """ - - # Unused and deprecated. Will be removed in the next release. - type: "FaultDelayFaultDelayType" = betterproto.enum_field(1) - # Add a fixed delay before forwarding the operation upstream. See - # https://developers.google.com/protocol-buffers/docs/proto3#json for the - # JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified delay will - # be injected before a new request/operation. For TCP connections, the - # proxying of the connection upstream will be delayed for the specified - # period. This is required if type is FIXED. - fixed_delay: timedelta = betterproto.message_field(3, group="fault_delay_secifier") - # Fault delays are controlled via an HTTP header (if applicable). - header_delay: "FaultDelayHeaderDelay" = betterproto.message_field( - 5, group="fault_delay_secifier" - ) - # The percentage of operations/connections/requests on which the delay will - # be injected. - percentage: "____type__.FractionalPercent" = betterproto.message_field(4) - - def __post_init__(self) -> None: - super().__post_init__() - if self.type: - warnings.warn("FaultDelay.type is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class FaultDelayHeaderDelay(betterproto.Message): - """ - Fault delays are controlled via an HTTP header (if applicable). See the - :ref:`HTTP fault filter ` - documentation for more information. - """ - - pass - - -@dataclass(eq=False, repr=False) -class FaultRateLimit(betterproto.Message): - """Describes a rate limit to be applied.""" - - # A fixed rate limit. - fixed_limit: "FaultRateLimitFixedLimit" = betterproto.message_field( - 1, group="limit_type" - ) - # Rate limits are controlled via an HTTP header (if applicable). - header_limit: "FaultRateLimitHeaderLimit" = betterproto.message_field( - 3, group="limit_type" - ) - # The percentage of operations/connections/requests on which the rate limit - # will be injected. - percentage: "____type__.FractionalPercent" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class FaultRateLimitFixedLimit(betterproto.Message): - """Describes a fixed/constant rate limit.""" - - # The limit supplied in KiB/s. - limit_kbps: int = betterproto.uint64_field(1) - - -@dataclass(eq=False, repr=False) -class FaultRateLimitHeaderLimit(betterproto.Message): - """ - Rate limits are controlled via an HTTP header (if applicable). See the - :ref:`HTTP fault filter ` - documentation for more information. - """ - - pass - - -from ..... import type as ____type__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/adaptive_concurrency/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/adaptive_concurrency/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/adaptive_concurrency/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/adaptive_concurrency/v2alpha/__init__.py deleted file mode 100644 index 0879b86..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/adaptive_concurrency/v2alpha/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GradientControllerConfig(betterproto.Message): - """Configuration parameters for the gradient controller.""" - - # The percentile to use when summarizing aggregated samples. Defaults to p50. - sample_aggregate_percentile: "_____type__.Percent" = betterproto.message_field(1) - concurrency_limit_params: "GradientControllerConfigConcurrencyLimitCalculationParams" = betterproto.message_field( - 2 - ) - min_rtt_calc_params: "GradientControllerConfigMinimumRttCalculationParams" = ( - betterproto.message_field(3) - ) - - -@dataclass(eq=False, repr=False) -class GradientControllerConfigConcurrencyLimitCalculationParams(betterproto.Message): - """ - Parameters controlling the periodic recalculation of the concurrency limit - from sampled request latencies. - """ - - # The allowed upper-bound on the calculated concurrency limit. Defaults to - # 1000. - max_concurrency_limit: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The period of time samples are taken to recalculate the concurrency limit. - concurrency_update_interval: timedelta = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class GradientControllerConfigMinimumRttCalculationParams(betterproto.Message): - """ - Parameters controlling the periodic minRTT recalculation. [#next-free- - field: 6] - """ - - # The time interval between recalculating the minimum request round-trip - # time. - interval: timedelta = betterproto.message_field(1) - # The number of requests to aggregate/sample during the minRTT recalculation - # window before updating. Defaults to 50. - request_count: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Randomized time delta that will be introduced to the start of the minRTT - # calculation window. This is represented as a percentage of the interval - # duration. Defaults to 15%. Example: If the interval is 10s and the jitter - # is 15%, the next window will begin somewhere in the range (10s - 11.5s). - jitter: "_____type__.Percent" = betterproto.message_field(3) - # The concurrency limit set while measuring the minRTT. Defaults to 3. - min_concurrency: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Amount added to the measured minRTT to add stability to the concurrency - # limit during natural variability in latency. This is expressed as a - # percentage of the measured value and can be adjusted to allow more or less - # tolerance to the sampled latency values. Defaults to 25%. - buffer: "_____type__.Percent" = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class AdaptiveConcurrency(betterproto.Message): - # Gradient concurrency control will be used. - gradient_controller_config: "GradientControllerConfig" = betterproto.message_field( - 1, group="concurrency_controller_config" - ) - # If set to false, the adaptive concurrency filter will operate as a pass- - # through filter. If the message is unspecified, the filter will be enabled. - enabled: "_____api_v2_core__.RuntimeFeatureFlag" = betterproto.message_field(2) - - -from ...... import type as _____type__ -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/aws_lambda/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/aws_lambda/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/aws_lambda/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/aws_lambda/v2alpha/__init__.py deleted file mode 100644 index bbce534..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/aws_lambda/v2alpha/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ConfigInvocationMode(betterproto.Enum): - SYNCHRONOUS = 0 - ASYNCHRONOUS = 1 - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - """AWS Lambda filter config""" - - # The ARN of the AWS Lambda to invoke when the filter is engaged Must be in - # the following format: arn::lambda:::function: - arn: str = betterproto.string_field(1) - # Whether to transform the request (headers and body) to a JSON payload or - # pass it as is. - payload_passthrough: bool = betterproto.bool_field(2) - # Determines the way to invoke the Lambda function. - invocation_mode: "ConfigInvocationMode" = betterproto.enum_field(3) - - -@dataclass(eq=False, repr=False) -class PerRouteConfig(betterproto.Message): - """ - Per-route configuration for AWS Lambda. This can be useful when invoking a - different Lambda function or a different version of the same Lambda - depending on the route. - """ - - invoke_config: "Config" = betterproto.message_field(1) diff --git a/src/envoy_data_plane/envoy/config/filter/http/aws_request_signing/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/aws_request_signing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/aws_request_signing/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/aws_request_signing/v2alpha/__init__.py deleted file mode 100644 index aa30ea1..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/aws_request_signing/v2alpha/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class AwsRequestSigning(betterproto.Message): - """Top level configuration for the AWS request signing filter.""" - - # The `service namespace `_ of the HTTP - # endpoint. Example: s3 - service_name: str = betterproto.string_field(1) - # The `region `_ - # hosting the HTTP endpoint. Example: us-west-2 - region: str = betterproto.string_field(2) - # Indicates that before signing headers, the host header will be swapped with - # this value. If not set or empty, the original host header value will be - # used and no rewrite will happen. Note: this rewrite affects both signing - # and host header forwarding. However, this option shouldn't be used with - # :ref:`HCM host rewrite ` - # given that the value set here would be used for signing whereas the value - # set in the HCM would be used for host header forwarding which is not the - # desired outcome. - host_rewrite: str = betterproto.string_field(3) diff --git a/src/envoy_data_plane/envoy/config/filter/http/buffer/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/buffer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/buffer/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/buffer/v2/__init__.py deleted file mode 100644 index 8c806e0..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/buffer/v2/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/buffer/v2/buffer.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Buffer(betterproto.Message): - # The maximum request size that the filter will buffer before the connection - # manager will stop buffering and return a 413 response. - max_request_bytes: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class BufferPerRoute(betterproto.Message): - # Disable the buffer filter for this particular vhost or route. - disabled: bool = betterproto.bool_field(1, group="override") - # Override the global configuration of the filter with this new config. - buffer: "Buffer" = betterproto.message_field(2, group="override") diff --git a/src/envoy_data_plane/envoy/config/filter/http/cache/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/cache/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/cache/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/cache/v2alpha/__init__.py deleted file mode 100644 index 4879fc4..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/cache/v2alpha/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/cache/v2alpha/cache.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CacheConfig(betterproto.Message): - # Config specific to the cache storage implementation. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # List of matching rules that defines allowed *Vary* headers. The *vary* - # response header holds a list of header names that affect the contents of a - # response, as described by - # https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. During - # insertion, *allowed_vary_headers* acts as a allowlist: if a response's - # *vary* header mentions any header names that aren't matched by any rules in - # *allowed_vary_headers*, that response will not be cached. During lookup, - # *allowed_vary_headers* controls what request headers will be sent to the - # cache storage implementation. - allowed_vary_headers: List[ - "_____type_matcher__.StringMatcher" - ] = betterproto.message_field(2) - # [#not-implemented-hide:] - # Modifies cache key creation by restricting which parts of the URL are - # included. - key_creator_params: "CacheConfigKeyCreatorParams" = betterproto.message_field(3) - # [#not-implemented-hide:] Max body - # size the cache filter will insert into a cache. 0 means unlimited (though - # the cache storage implementation may have its own limit beyond which it - # will reject insertions). - max_body_bytes: int = betterproto.uint32_field(4) - - -@dataclass(eq=False, repr=False) -class CacheConfigKeyCreatorParams(betterproto.Message): - """ - [#not-implemented-hide:] Modifies cache key creation by restricting which - parts of the URL are included. - """ - - # If true, exclude the URL scheme from the cache key. Set to true if your - # origins always produce the same response for http and https requests. - exclude_scheme: bool = betterproto.bool_field(1) - # If true, exclude the host from the cache key. Set to true if your origins' - # responses don't ever depend on host. - exclude_host: bool = betterproto.bool_field(2) - # If *query_parameters_included* is nonempty, only query parameters matched - # by one or more of its matchers are included in the cache key. Any other - # query params will not affect cache lookup. - query_parameters_included: List[ - "_____api_v2_route__.QueryParameterMatcher" - ] = betterproto.message_field(3) - # If *query_parameters_excluded* is nonempty, query parameters matched by one - # or more of its matchers are excluded from the cache key (even if also - # matched by *query_parameters_included*), and will not affect cache lookup. - query_parameters_excluded: List[ - "_____api_v2_route__.QueryParameterMatcher" - ] = betterproto.message_field(4) - - -from ......api.v2 import route as _____api_v2_route__ -from ......type import matcher as _____type_matcher__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/http/compressor/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/compressor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/compressor/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/compressor/v2/__init__.py deleted file mode 100644 index 98e9020..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/compressor/v2/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/compressor/v2/compressor.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Compressor(betterproto.Message): - """[#next-free-field: 6]""" - - # Minimum response length, in bytes, which will trigger compression. The - # default value is 30. - content_length: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Set of strings that allows specifying which mime-types yield compression; - # e.g., application/json, text/html, etc. When this field is not defined, - # compression will be applied to the following mime-types: - # "application/javascript", "application/json", "application/xhtml+xml", - # "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" and - # their synonyms. - content_type: List[str] = betterproto.string_field(2) - # If true, disables compression when the response contains an etag header. - # When it is false, the filter will preserve weak etags and remove the ones - # that require strong validation. - disable_on_etag_header: bool = betterproto.bool_field(3) - # If true, removes accept-encoding from the request headers before - # dispatching it to the upstream so that responses do not get compressed - # before reaching the filter. .. attention: To avoid interfering with - # other compression filters in the same chain use this option in the - # filter closest to the upstream. - remove_accept_encoding_header: bool = betterproto.bool_field(4) - # Runtime flag that controls whether the filter is enabled or not. If set to - # false, the filter will operate as a pass-through filter. If not specified, - # defaults to enabled. - runtime_enabled: "_____api_v2_core__.RuntimeFeatureFlag" = ( - betterproto.message_field(5) - ) - - -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/cors/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/cors/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/cors/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/cors/v2/__init__.py deleted file mode 100644 index 899638e..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/cors/v2/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/cors/v2/cors.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Cors(betterproto.Message): - """Cors filter config.""" - - pass diff --git a/src/envoy_data_plane/envoy/config/filter/http/csrf/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/csrf/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/csrf/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/csrf/v2/__init__.py deleted file mode 100644 index 78de4f2..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/csrf/v2/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/csrf/v2/csrf.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CsrfPolicy(betterproto.Message): - """CSRF filter config.""" - - # Specifies the % of requests for which the CSRF filter is enabled. If - # :ref:`runtime_key - # ` is specified, - # Envoy will lookup the runtime key to get the percentage of requests to - # filter. .. note:: This field defaults to 100/:ref:`HUNDRED - # `. - filter_enabled: "_____api_v2_core__.RuntimeFractionalPercent" = ( - betterproto.message_field(1) - ) - # Specifies that CSRF policies will be evaluated and tracked, but not - # enforced. This is intended to be used when ``filter_enabled`` is off and - # will be ignored otherwise. If :ref:`runtime_key - # ` is specified, - # Envoy will lookup the runtime key to get the percentage of requests for - # which it will evaluate and track the request's *Origin* and *Destination* - # to determine if it's valid, but will not enforce any policies. - shadow_enabled: "_____api_v2_core__.RuntimeFractionalPercent" = ( - betterproto.message_field(2) - ) - # Specifies additional source origins that will be allowed in addition to the - # destination origin. More information on how this can be configured via - # runtime can be found :ref:`here `. - additional_origins: List[ - "_____type_matcher__.StringMatcher" - ] = betterproto.message_field(3) - - -from ......api.v2 import core as _____api_v2_core__ -from ......type import matcher as _____type_matcher__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/dynamic_forward_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/dynamic_forward_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/__init__.py deleted file mode 100644 index 464dca9..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """ - Configuration for the dynamic forward proxy HTTP filter. See the - :ref:`architecture overview ` for - more information. [#extension: envoy.filters.http.dynamic_forward_proxy] - """ - - # The DNS cache configuration that the filter will attach to. Note this - # configuration must match that of associated :ref:`dynamic forward proxy - # cluster configuration `. - dns_cache_config: "____common_dynamic_forward_proxy_v2_alpha__.DnsCacheConfig" = ( - betterproto.message_field(1) - ) - - -@dataclass(eq=False, repr=False) -class PerRouteConfig(betterproto.Message): - """Per route Configuration for the dynamic forward proxy HTTP filter.""" - - # Indicates that before DNS lookup, the host header will be swapped with this - # value. If not set or empty, the original host header value will be used and - # no rewrite will happen. Note: this rewrite affects both DNS lookup and host - # header forwarding. However, this option shouldn't be used with :ref:`HCM - # host rewrite ` given that - # the value set here would be used for DNS lookups whereas the value set in - # the HCM would be used for host header forwarding which is not the desired - # outcome. - host_rewrite: str = betterproto.string_field(1, group="host_rewrite_specifier") - # Indicates that before DNS lookup, the host header will be swapped with the - # value of this header. If not set or empty, the original host header value - # will be used and no rewrite will happen. Note: this rewrite affects both - # DNS lookup and host header forwarding. However, this option shouldn't be - # used with :ref:`HCM host rewrite header - # ` given that - # the value set here would be used for DNS lookups whereas the value set in - # the HCM would be used for host header forwarding which is not the desired - # outcome. .. note:: If the header appears multiple times only the first - # value is used. - auto_host_rewrite_header: str = betterproto.string_field( - 2, group="host_rewrite_specifier" - ) - - -from .....common.dynamic_forward_proxy import ( - v2alpha as ____common_dynamic_forward_proxy_v2_alpha__, -) diff --git a/src/envoy_data_plane/envoy/config/filter/http/dynamo/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/dynamo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/dynamo/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/dynamo/v2/__init__.py deleted file mode 100644 index d581270..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/dynamo/v2/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/dynamo/v2/dynamo.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Dynamo(betterproto.Message): - """Dynamo filter config.""" - - pass diff --git a/src/envoy_data_plane/envoy/config/filter/http/ext_authz/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/ext_authz/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/ext_authz/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/ext_authz/v2/__init__.py deleted file mode 100644 index 7c91677..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/ext_authz/v2/__init__.py +++ /dev/null @@ -1,221 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/ext_authz/v2/ext_authz.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ExtAuthz(betterproto.Message): - """[#next-free-field: 12]""" - - # gRPC service configuration (default timeout: 200ms). - grpc_service: "_____api_v2_core__.GrpcService" = betterproto.message_field( - 1, group="services" - ) - # HTTP service configuration (default timeout: 200ms). - http_service: "HttpService" = betterproto.message_field(3, group="services") - # Changes filter's behaviour on errors: 1. When set to true, the filter will - # *accept* client request even if the communication with the authorization - # service has failed, or if the authorization service has returned a HTTP 5xx - # error. 2. When set to false, ext-authz will *reject* client requests and - # return a *Forbidden* response if the communication with the authorization - # service has failed, or if the authorization service has returned a HTTP - # 5xx error. Note that errors can be *always* tracked in the :ref:`stats - # `. - failure_mode_allow: bool = betterproto.bool_field(2) - # [#not-implemented-hide: Support for this field has been removed.] - use_alpha: bool = betterproto.bool_field(4) - # Enables filter to buffer the client request body and send it within the - # authorization request. A ``x-envoy-auth-partial-body: false|true`` metadata - # header will be added to the authorization request message indicating if the - # body data is partial. - with_request_body: "BufferSettings" = betterproto.message_field(5) - # Clears route cache in order to allow the external authorization service to - # correctly affect routing decisions. Filter clears all cached routes when: - # 1. The field is set to *true*. 2. The status returned from the - # authorization service is a HTTP 200 or gRPC 0. 3. At least one - # *authorization response header* is added to the client request, or is used - # for altering another client request header. - clear_route_cache: bool = betterproto.bool_field(6) - # Sets the HTTP status that is returned to the client when there is a network - # error between the filter and the authorization server. The default status - # is HTTP 403 Forbidden. - status_on_error: "_____type__.HttpStatus" = betterproto.message_field(7) - # Specifies a list of metadata namespaces whose values, if present, will be - # passed to the ext_authz service as an opaque *protobuf::Struct*. For - # example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata ` is set, then the following will pass the jwt payload to the - # authorization server. .. code-block:: yaml metadata_context_namespaces: - # - envoy.filters.http.jwt_authn - metadata_context_namespaces: List[str] = betterproto.string_field(8) - # Specifies if the filter is enabled. If :ref:`runtime_key - # ` is specified, - # Envoy will lookup the runtime key to get the percentage of requests to - # filter. If this field is not specified, the filter will be enabled for all - # requests. - filter_enabled: "_____api_v2_core__.RuntimeFractionalPercent" = ( - betterproto.message_field(9) - ) - # Specifies whether to deny the requests, when the filter is disabled. If - # :ref:`runtime_key ` is - # specified, Envoy will lookup the runtime key to determine whether to deny - # request for filter protected path at filter disabling. If filter is - # disabled in typed_per_filter_config for the path, requests will not be - # denied. If this field is not specified, all requests will be allowed when - # disabled. - deny_at_disable: "_____api_v2_core__.RuntimeFeatureFlag" = ( - betterproto.message_field(11) - ) - # Specifies if the peer certificate is sent to the external service. When - # this field is true, Envoy will include the peer X.509 certificate, if - # available, in the :ref:`certificate`. - include_peer_certificate: bool = betterproto.bool_field(10) - - def __post_init__(self) -> None: - super().__post_init__() - if self.use_alpha: - warnings.warn("ExtAuthz.use_alpha is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class BufferSettings(betterproto.Message): - """Configuration for buffering the request data.""" - - # Sets the maximum size of a message body that the filter will hold in - # memory. Envoy will return *HTTP 413* and will *not* initiate the - # authorization process when buffer reaches the number set in this field. - # Note that this setting will have precedence over :ref:`failure_mode_allow < - # envoy_api_field_config.filter.http.ext_authz.v2.ExtAuthz.failure_mode_allow - # >`. - max_request_bytes: int = betterproto.uint32_field(1) - # When this field is true, Envoy will buffer the message until - # *max_request_bytes* is reached. The authorization request will be - # dispatched and no 413 HTTP error will be returned by the filter. - allow_partial_message: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class HttpService(betterproto.Message): - """ - HttpService is used for raw HTTP communication between the filter and the - authorization service. When configured, the filter will parse the client - request and use these attributes to call the authorization server. - Depending on the response, the filter may reject or accept the client - request. Note that in any of these events, metadata can be added, removed - or overridden by the filter: *On authorization request*, a list of allowed - request headers may be supplied. See :ref:`allowed_headers ` for - details. Additional headers metadata may be added to the authorization - request. See :ref:`headers_to_add ` for details. On authorization - response status HTTP 200 OK, the filter will allow traffic to the upstream - and additional headers metadata may be added to the original client - request. See :ref:`allowed_upstream_headers ` for - details. On other authorization response statuses, the filter will not - allow traffic. Additional headers metadata as well as body may be added to - the client's response. See :ref:`allowed_client_headers - ` for details. [#next-free-field: 9] - """ - - # Sets the HTTP server URI which the authorization requests must be sent to. - server_uri: "_____api_v2_core__.HttpUri" = betterproto.message_field(1) - # Sets a prefix to the value of authorization request header *Path*. - path_prefix: str = betterproto.string_field(2) - # Settings used for controlling authorization request metadata. - authorization_request: "AuthorizationRequest" = betterproto.message_field(7) - # Settings used for controlling authorization response metadata. - authorization_response: "AuthorizationResponse" = betterproto.message_field(8) - - -@dataclass(eq=False, repr=False) -class AuthorizationRequest(betterproto.Message): - # Authorization request will include the client request headers that have a - # correspondent match in the :ref:`list - # `. Note that in addition to - # the user's supplied matchers: 1. *Host*, *Method*, *Path* and *Content- - # Length* are automatically included to the list. 2. *Content-Length* will be - # set to 0 and the request to the authorization service will not have a - # message body. However, the authorization request can include the buffered - # client request body (controlled by :ref:`with_request_body ` setting), - # consequently the value of *Content-Length* of the authorization request - # reflects the size of its payload size. - allowed_headers: "_____type_matcher__.ListStringMatcher" = ( - betterproto.message_field(1) - ) - # Sets a list of headers that will be included to the request to - # authorization service. Note that client request of the same key will be - # overridden. - headers_to_add: List["_____api_v2_core__.HeaderValue"] = betterproto.message_field( - 2 - ) - - -@dataclass(eq=False, repr=False) -class AuthorizationResponse(betterproto.Message): - # When this :ref:`list ` is - # set, authorization response headers that have a correspondent match will be - # added to the original client request. Note that coexistent headers will be - # overridden. - allowed_upstream_headers: "_____type_matcher__.ListStringMatcher" = ( - betterproto.message_field(1) - ) - # When this :ref:`list `. is - # set, authorization response headers that have a correspondent match will be - # added to the client's response. Note that when this list is *not* set, all - # the authorization response headers, except *Authority (Host)* will be in - # the response to the client. When a header is included in this list, *Path*, - # *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are - # automatically added. - allowed_client_headers: "_____type_matcher__.ListStringMatcher" = ( - betterproto.message_field(2) - ) - - -@dataclass(eq=False, repr=False) -class ExtAuthzPerRoute(betterproto.Message): - """Extra settings on a per virtualhost/route/weighted-cluster level.""" - - # Disable the ext auth filter for this particular vhost or route. If disabled - # is specified in multiple per-filter-configs, the most specific one will be - # used. - disabled: bool = betterproto.bool_field(1, group="override") - # Check request settings for this route. - check_settings: "CheckSettings" = betterproto.message_field(2, group="override") - - -@dataclass(eq=False, repr=False) -class CheckSettings(betterproto.Message): - """ - Extra settings for the check request. You can use this to provide extra - context for the external authorization server on specific virtual hosts \ - routes. For example, adding a context extension on the virtual host level - can give the ext-authz server information on what virtual host is used - without needing to parse the host header. If CheckSettings is specified in - multiple per-filter-configs, they will be merged in order, and the result - will be used. - """ - - # Context extensions to set on the CheckRequest's :ref:`AttributeContext.cont - # ext_extensions` Merge semantics for this field are such that keys from more - # specific configs override. .. note:: These settings are only applied to a - # filter configured with a :ref:`grpc_service`. - context_extensions: Dict[str, str] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -from ...... import type as _____type__ -from ......api.v2 import core as _____api_v2_core__ -from ......type import matcher as _____type_matcher__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/fault/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/fault/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/fault/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/fault/v2/__init__.py deleted file mode 100644 index 657b481..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/fault/v2/__init__.py +++ /dev/null @@ -1,114 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/fault/v2/fault.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FaultAbort(betterproto.Message): - # HTTP status code to use to abort the HTTP request. - http_status: int = betterproto.uint32_field(2, group="error_type") - # Fault aborts are controlled via an HTTP header (if applicable). - header_abort: "FaultAbortHeaderAbort" = betterproto.message_field( - 4, group="error_type" - ) - # The percentage of requests/operations/connections that will be aborted with - # the error code provided. - percentage: "_____type__.FractionalPercent" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class FaultAbortHeaderAbort(betterproto.Message): - """ - Fault aborts are controlled via an HTTP header (if applicable). See the - :ref:`HTTP fault filter ` - documentation for more information. - """ - - pass - - -@dataclass(eq=False, repr=False) -class HttpFault(betterproto.Message): - """[#next-free-field: 14]""" - - # If specified, the filter will inject delays based on the values in the - # object. - delay: "___fault_v2__.FaultDelay" = betterproto.message_field(1) - # If specified, the filter will abort requests based on the values in the - # object. At least *abort* or *delay* must be specified. - abort: "FaultAbort" = betterproto.message_field(2) - # Specifies the name of the (destination) upstream cluster that the filter - # should match on. Fault injection will be restricted to requests bound to - # the specific upstream cluster. - upstream_cluster: str = betterproto.string_field(3) - # Specifies a set of headers that the filter should match on. The fault - # injection filter can be applied selectively to requests that match a set of - # headers specified in the fault filter config. The chances of actual fault - # injection further depend on the value of the :ref:`percentage - # ` field. - # The filter will check the request's headers against all the specified - # headers in the filter config. A match will happen if all the headers in the - # config are present in the request with the same values (or based on - # presence if the *value* field is not in the config). - headers: List["_____api_v2_route__.HeaderMatcher"] = betterproto.message_field(4) - # Faults are injected for the specified list of downstream hosts. If this - # setting is not set, faults are injected for all downstream nodes. - # Downstream node name is taken from :ref:`the HTTP x-envoy-downstream- - # service-node ` header - # and compared against downstream_nodes list. - downstream_nodes: List[str] = betterproto.string_field(5) - # The maximum number of faults that can be active at a single time via the - # configured fault filter. Note that because this setting can be overridden - # at the route level, it's possible for the number of active faults to be - # greater than this value (if injected via a different route). If not - # specified, defaults to unlimited. This setting can be overridden via - # `runtime ` and any faults that - # are not injected due to overflow will be indicated via the `faults_overflow - # ` stat. .. attention:: Like - # other :ref:`circuit breakers ` in Envoy, this - # is a fuzzy limit. It's possible for the number of active faults to rise - # slightly above the configured amount due to the implementation details. - max_active_faults: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # The response rate limit to be applied to the response body of the stream. - # When configured, the percentage can be overridden by the - # :ref:`fault.http.rate_limit.response_percent - # ` runtime key. .. attention:: - # This is a per-stream limit versus a connection level limit. This means that - # concurrent streams will each get an independent limit. - response_rate_limit: "___fault_v2__.FaultRateLimit" = betterproto.message_field(7) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.delay.fixed_delay_percent - delay_percent_runtime: str = betterproto.string_field(8) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.abort.abort_percent - abort_percent_runtime: str = betterproto.string_field(9) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.delay.fixed_duration_ms - delay_duration_runtime: str = betterproto.string_field(10) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.abort.http_status - abort_http_status_runtime: str = betterproto.string_field(11) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.max_active_faults - max_active_faults_runtime: str = betterproto.string_field(12) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.rate_limit.response_percent - response_rate_limit_percent_runtime: str = betterproto.string_field(13) - - -from ...... import type as _____type__ -from ......api.v2 import route as _____api_v2_route__ -from ....fault import v2 as ___fault_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_bridge/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_bridge/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_bridge/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_bridge/v2/__init__.py deleted file mode 100644 index b035fa4..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_bridge/v2/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/grpc_http1_bridge/v2/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - """gRPC HTTP/1.1 Bridge filter config.""" - - pass diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_reverse_bridge/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_reverse_bridge/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/__init__.py deleted file mode 100644 index 3bdc556..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """gRPC reverse bridge filter configuration""" - - # The content-type to pass to the upstream when the gRPC bridge filter is - # applied. The filter will also validate that the upstream responds with the - # same content type. - content_type: str = betterproto.string_field(1) - # If true, Envoy will assume that the upstream doesn't understand gRPC frames - # and strip the gRPC frame from the request, and add it back in to the - # response. This will hide the gRPC semantics from the upstream, allowing it - # to receive and respond with a simple binary encoded protobuf. - withhold_grpc_frames: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class FilterConfigPerRoute(betterproto.Message): - """ - gRPC reverse bridge filter configuration per virtualhost/route/weighted- - cluster level. - """ - - # If true, disables gRPC reverse bridge filter for this particular vhost or - # route. If disabled is specified in multiple per-filter-configs, the most - # specific one will be used. - disabled: bool = betterproto.bool_field(1) diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_stats/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_stats/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_stats/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_stats/v2alpha/__init__.py deleted file mode 100644 index 9c42967..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/grpc_stats/v2alpha/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/grpc_stats/v2alpha/config.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """gRPC statistics filter configuration""" - - # If true, the filter maintains a filter state object with the request and - # response message counts. - emit_filter_state: bool = betterproto.bool_field(1) - # If set, specifies an allowlist of service/methods that will have individual - # stats emitted for them. Any call that does not match the allowlist will be - # counted in a stat with no method specifier: `cluster..grpc.*`. - individual_method_stats_allowlist: "_____api_v2_core__.GrpcMethodList" = ( - betterproto.message_field(2, group="per_method_stat_specifier") - ) - # If set to true, emit stats for all service/method names. If set to false, - # emit stats for all service/message types to the same stats without - # including the service/method in the name, with prefix - # `cluster..grpc`. This can be useful if service/method granularity is - # not needed, or if each cluster only receives a single method. .. - # attention:: This option is only safe if all clients are trusted. If this - # option is enabled with untrusted clients, the clients could cause - # unbounded growth in the number of stats in Envoy, using unbounded memory - # and potentially slowing down stats pipelines. .. attention:: If neither - # `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the - # behavior will default to `stats_for_all_methods=false`. This default value - # is changed due to the previous value being deprecated. This behavior can - # be changed with runtime override `envoy.deprecated_features.grpc_stats_fi - # lter_enable_stats_for_all_methods_by_default`. - stats_for_all_methods: Optional[bool] = betterproto.message_field( - 3, wraps=betterproto.TYPE_BOOL, group="per_method_stat_specifier" - ) - - -@dataclass(eq=False, repr=False) -class FilterObject(betterproto.Message): - """gRPC statistics filter state object in protobuf form.""" - - # Count of request messages in the request stream. - request_message_count: int = betterproto.uint64_field(1) - # Count of response messages in the response stream. - response_message_count: int = betterproto.uint64_field(2) - - -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_web/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_web/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/grpc_web/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/grpc_web/v2/__init__.py deleted file mode 100644 index 87365e8..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/grpc_web/v2/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/grpc_web/v2/grpc_web.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GrpcWeb(betterproto.Message): - """gRPC Web filter config.""" - - pass diff --git a/src/envoy_data_plane/envoy/config/filter/http/gzip/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/gzip/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/gzip/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/gzip/v2/__init__.py deleted file mode 100644 index 09d697e..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/gzip/v2/__init__.py +++ /dev/null @@ -1,108 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/gzip/v2/gzip.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class GzipCompressionStrategy(betterproto.Enum): - DEFAULT = 0 - FILTERED = 1 - HUFFMAN = 2 - RLE = 3 - - -class GzipCompressionLevelEnum(betterproto.Enum): - DEFAULT = 0 - BEST = 1 - SPEED = 2 - - -@dataclass(eq=False, repr=False) -class Gzip(betterproto.Message): - """[#next-free-field: 11]""" - - # Value from 1 to 9 that controls the amount of internal memory used by zlib. - # Higher values use more memory, but are faster and produce better - # compression results. The default value is 5. - memory_level: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Minimum response length, in bytes, which will trigger compression. The - # default value is 30. .. attention: **This field is deprecated**. Set the - # `compressor` field instead. - content_length: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # A value used for selecting the zlib compression level. This setting will - # affect speed and amount of compression applied to the content. "BEST" - # provides higher compression at the cost of higher latency, "SPEED" provides - # lower compression with minimum impact on response time. "DEFAULT" provides - # an optimal result between speed and compression. This field will be set to - # "DEFAULT" if not specified. - compression_level: "GzipCompressionLevelEnum" = betterproto.enum_field(3) - # A value used for selecting the zlib compression strategy which is directly - # related to the characteristics of the content. Most of the time "DEFAULT" - # will be the best choice, though there are situations which changing this - # parameter might produce better results. For example, run-length encoding - # (RLE) is typically used when the content is known for having sequences - # which same data occurs many consecutive times. For more information about - # each strategy, please refer to zlib manual. - compression_strategy: "GzipCompressionStrategy" = betterproto.enum_field(4) - # Set of strings that allows specifying which mime-types yield compression; - # e.g., application/json, text/html, etc. When this field is not defined, - # compression will be applied to the following mime-types: - # "application/javascript", "application/json", "application/xhtml+xml", - # "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". .. - # attention: **This field is deprecated**. Set the `compressor` field - # instead. - content_type: List[str] = betterproto.string_field(6) - # If true, disables compression when the response contains an etag header. - # When it is false, the filter will preserve weak etags and remove the ones - # that require strong validation. .. attention: **This field is - # deprecated**. Set the `compressor` field instead. - disable_on_etag_header: bool = betterproto.bool_field(7) - # If true, removes accept-encoding from the request headers before - # dispatching it to the upstream so that responses do not get compressed - # before reaching the filter. .. attention: **This field is deprecated**. - # Set the `compressor` field instead. - remove_accept_encoding_header: bool = betterproto.bool_field(8) - # Value from 9 to 15 that represents the base two logarithmic of the - # compressor's window size. Larger window results in better compression at - # the expense of memory usage. The default is 12 which will produce a 4096 - # bytes window. For more details about this parameter, please refer to zlib - # manual > deflateInit2. - window_bits: Optional[int] = betterproto.message_field( - 9, wraps=betterproto.TYPE_UINT32 - ) - # Set of configuration parameters common for all compression filters. If this - # field is set then the fields `content_length`, `content_type`, - # `disable_on_etag_header` and `remove_accept_encoding_header` are ignored. - compressor: "__compressor_v2__.Compressor" = betterproto.message_field(10) - - def __post_init__(self) -> None: - super().__post_init__() - if self.content_length: - warnings.warn("Gzip.content_length is deprecated", DeprecationWarning) - if self.content_type: - warnings.warn("Gzip.content_type is deprecated", DeprecationWarning) - if self.disable_on_etag_header: - warnings.warn( - "Gzip.disable_on_etag_header is deprecated", DeprecationWarning - ) - if self.remove_accept_encoding_header: - warnings.warn( - "Gzip.remove_accept_encoding_header is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class GzipCompressionLevel(betterproto.Message): - pass - - -from ...compressor import v2 as __compressor_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/header_to_metadata/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/header_to_metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/header_to_metadata/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/header_to_metadata/v2/__init__.py deleted file mode 100644 index a1ca9b0..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/header_to_metadata/v2/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ConfigValueType(betterproto.Enum): - STRING = 0 - NUMBER = 1 - PROTOBUF_VALUE = 2 - - -class ConfigValueEncode(betterproto.Enum): - NONE = 0 - BASE64 = 1 - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - # The list of rules to apply to requests. - request_rules: List["ConfigRule"] = betterproto.message_field(1) - # The list of rules to apply to responses. - response_rules: List["ConfigRule"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ConfigKeyValuePair(betterproto.Message): - """[#next-free-field: 6]""" - - # The namespace — if this is empty, the filter's namespace will be used. - metadata_namespace: str = betterproto.string_field(1) - # The key to use within the namespace. - key: str = betterproto.string_field(2) - # The value to pair with the given key. When used for a `on_header_present` - # case, if value is non-empty it'll be used instead of the header value. If - # both are empty, no metadata is added. When used for a `on_header_missing` - # case, a non-empty value must be provided otherwise no metadata is added. - value: str = betterproto.string_field(3) - # The value's type — defaults to string. - type: "ConfigValueType" = betterproto.enum_field(4) - # How is the value encoded, default is NONE (not encoded). The value will be - # decoded accordingly before storing to metadata. - encode: "ConfigValueEncode" = betterproto.enum_field(5) - - -@dataclass(eq=False, repr=False) -class ConfigRule(betterproto.Message): - """ - A Rule defines what metadata to apply when a header is present or missing. - """ - - # The header that triggers this rule — required. - header: str = betterproto.string_field(1) - # If the header is present, apply this metadata KeyValuePair. If the value in - # the KeyValuePair is non-empty, it'll be used instead of the header value. - on_header_present: "ConfigKeyValuePair" = betterproto.message_field(2) - # If the header is not present, apply this metadata KeyValuePair. The value - # in the KeyValuePair must be set, since it'll be used in lieu of the missing - # header value. - on_header_missing: "ConfigKeyValuePair" = betterproto.message_field(3) - # Whether or not to remove the header after a rule is applied. This prevents - # headers from leaking. - remove: bool = betterproto.bool_field(4) diff --git a/src/envoy_data_plane/envoy/config/filter/http/health_check/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/health_check/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/health_check/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/health_check/v2/__init__.py deleted file mode 100644 index ff12573..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/health_check/v2/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/health_check/v2/health_check.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HealthCheck(betterproto.Message): - """[#next-free-field: 6]""" - - # Specifies whether the filter operates in pass through mode or not. - pass_through_mode: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # If operating in pass through mode, the amount of time in milliseconds that - # the filter should cache the upstream response. - cache_time: timedelta = betterproto.message_field(3) - # If operating in non-pass-through mode, specifies a set of upstream cluster - # names and the minimum percentage of servers in each of those clusters that - # must be healthy or degraded in order for the filter to return a 200. .. - # note:: This value is interpreted as an integer by truncating, so 12.50% - # will be calculated as if it were 12%. - cluster_min_healthy_percentages: Dict[ - str, "_____type__.Percent" - ] = betterproto.map_field(4, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Specifies a set of health check request headers to match on. The health - # check filter will check a request’s headers against all the specified - # headers. To specify the health check endpoint, set the ``:path`` header to - # match on. - headers: List["_____api_v2_route__.HeaderMatcher"] = betterproto.message_field(5) - - -from ...... import type as _____type__ -from ......api.v2 import route as _____api_v2_route__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/ip_tagging/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/ip_tagging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/ip_tagging/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/ip_tagging/v2/__init__.py deleted file mode 100644 index 937a16b..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/ip_tagging/v2/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class IpTaggingRequestType(betterproto.Enum): - BOTH = 0 - INTERNAL = 1 - EXTERNAL = 2 - - -@dataclass(eq=False, repr=False) -class IpTagging(betterproto.Message): - # The type of request the filter should apply to. - request_type: "IpTaggingRequestType" = betterproto.enum_field(1) - # [#comment:TODO(ccaraman): Extend functionality to load IP tags from file - # system. Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] - # The set of IP tags for the filter. - ip_tags: List["IpTaggingIpTag"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class IpTaggingIpTag(betterproto.Message): - """Supplies the IP tag name and the IP address subnets.""" - - # Specifies the IP tag name to apply. - ip_tag_name: str = betterproto.string_field(1) - # A list of IP address subnets that will be tagged with ip_tag_name. Both - # IPv4 and IPv6 are supported. - ip_list: List["_____api_v2_core__.CidrRange"] = betterproto.message_field(2) - - -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/jwt_authn/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/jwt_authn/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/jwt_authn/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/jwt_authn/v2alpha/__init__.py deleted file mode 100644 index a9aa665..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/jwt_authn/v2alpha/__init__.py +++ /dev/null @@ -1,306 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/jwt_authn/v2alpha/config.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class JwtProvider(betterproto.Message): - """ - Please see following for JWT authentication flow: * `JSON Web Token (JWT) - `_ * `The OAuth 2.0 Authorization - Framework `_ * `OpenID Connect - `_ A JwtProvider message specifies how a JSON - Web Token (JWT) can be verified. It specifies: * issuer: the principal that - issues the JWT. It has to match the one from the token. * allowed - audiences: the ones in the token have to be listed here. * how to fetch - public key JWKS to verify the token signature. * how to extract JWT token - in the request. * how to pass successfully verified token payload. Example: - .. code-block:: yaml issuer: https://example.com audiences: - - bookstore_android.apps.googleusercontent.com - - bookstore_web.apps.googleusercontent.com remote_jwks: http_uri: - uri: https://example.com/.well-known/jwks.json cluster: - example_jwks_cluster cache_duration: seconds: 300 [#next- - free-field: 10] - """ - - # Specify the `principal - # `_ that issued the JWT, - # usually a URL or an email address. Example: https://securetoken.google.com - # Example: 1234567-compute@developer.gserviceaccount.com - issuer: str = betterproto.string_field(1) - # The list of JWT `audiences - # `_ are allowed to - # access. A JWT containing any of these audiences will be accepted. If not - # specified, will not check audiences in the token. Example: .. code-block:: - # yaml audiences: - bookstore_android.apps.googleusercontent.com - # - bookstore_web.apps.googleusercontent.com - audiences: List[str] = betterproto.string_field(2) - # JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies - # the remote HTTP URI and how the fetched JWKS should be cached. Example: .. - # code-block:: yaml remote_jwks: http_uri: uri: - # https://www.googleapis.com/oauth2/v1/certs cluster: - # jwt.www.googleapis.com|443 cache_duration: seconds: 300 - remote_jwks: "RemoteJwks" = betterproto.message_field( - 3, group="jwks_source_specifier" - ) - # JWKS is in local data source. It could be either in a local file or - # embedded in the inline_string. Example: local file .. code-block:: yaml - # local_jwks: filename: /etc/envoy/jwks/jwks1.txt Example: inline_string - # .. code-block:: yaml local_jwks: inline_string: ACADADADADA - local_jwks: "_____api_v2_core__.DataSource" = betterproto.message_field( - 4, group="jwks_source_specifier" - ) - # If false, the JWT is removed in the request after a success verification. - # If true, the JWT is not removed in the request. Default value is false. - forward: bool = betterproto.bool_field(5) - # Two fields below define where to extract the JWT from an HTTP request. If - # no explicit location is specified, the following default locations are - # tried in order: 1. The Authorization header using the `Bearer schema - # `_. Example:: - # Authorization: Bearer . 2. `access_token - # `_ query parameter. - # Multiple JWTs can be verified for a request. Each JWT has to be extracted - # from the locations its provider specified or from the default locations. - # Specify the HTTP headers to extract JWT token. For examples, following - # config: .. code-block:: yaml from_headers: - name: x-goog-iap-jwt- - # assertion can be used to extract token from header:: ``x-goog-iap-jwt- - # assertion: ``. - from_headers: List["JwtHeader"] = betterproto.message_field(6) - # JWT is sent in a query parameter. `jwt_params` represents the query - # parameter names. For example, if config is: .. code-block:: yaml - # from_params: - jwt_token The JWT format in query parameter is:: - # /path?jwt_token= - from_params: List[str] = betterproto.string_field(7) - # This field specifies the header name to forward a successfully verified JWT - # payload to the backend. The forwarded data is:: - # base64url_encoded(jwt_payload_in_JSON) If it is not specified, the payload - # will not be forwarded. - forward_payload_header: str = betterproto.string_field(8) - # If non empty, successfully verified JWT payloads will be written to - # StreamInfo DynamicMetadata in the format as: *namespace* is the jwt_authn - # filter name as **envoy.filters.http.jwt_authn** The value is the - # *protobuf::Struct*. The value of this field will be the key for its - # *fields* and the value is the *protobuf::Struct* converted from JWT JSON - # payload. For example, if payload_in_metadata is *my_payload*: .. code- - # block:: yaml envoy.filters.http.jwt_authn: my_payload: iss: - # https://example.com sub: test@example.com aud: - # https://example.com exp: 1501281058 - payload_in_metadata: str = betterproto.string_field(9) - - -@dataclass(eq=False, repr=False) -class RemoteJwks(betterproto.Message): - """ - This message specifies how to fetch JWKS from remote and how to cache it. - """ - - # The HTTP URI to fetch the JWKS. For example: .. code-block:: yaml - # http_uri: uri: https://www.googleapis.com/oauth2/v1/certs - # cluster: jwt.www.googleapis.com|443 - http_uri: "_____api_v2_core__.HttpUri" = betterproto.message_field(1) - # Duration after which the cached JWKS should be expired. If not specified, - # default cache duration is 5 minutes. - cache_duration: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class JwtHeader(betterproto.Message): - """This message specifies a header location to extract JWT token.""" - - # The HTTP header name. - name: str = betterproto.string_field(1) - # The value prefix. The value format is "value_prefix" For example, - # for "Authorization: Bearer ", value_prefix="Bearer " with a space at - # the end. - value_prefix: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ProviderWithAudiences(betterproto.Message): - """Specify a required provider with audiences.""" - - # Specify a required provider name. - provider_name: str = betterproto.string_field(1) - # This field overrides the one specified in the JwtProvider. - audiences: List[str] = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class JwtRequirement(betterproto.Message): - """ - This message specifies a Jwt requirement. An empty message means JWT - verification is not required. Here are some config examples: .. code- - block:: yaml # Example 1: not required with an empty message # Example 2: - require A provider_name: provider-A # Example 3: require A or B - requires_any: requirements: - provider_name: provider-A - - provider_name: provider-B # Example 4: require A and B requires_all: - requirements: - provider_name: provider-A - provider_name: - provider-B # Example 5: require A and (B or C) requires_all: - requirements: - provider_name: provider-A - requires_any: - requirements: - provider_name: provider-B - - provider_name: provider-C # Example 6: require A or (B and C) - requires_any: requirements: - provider_name: provider-A - - requires_all: requirements: - provider_name: provider-B - - provider_name: provider-C # Example 7: A is optional (if token from A is - provided, it must be valid, but also allows missing token.) requires_any: - requirements: - provider_name: provider-A - allow_missing: {} # - Example 8: A is optional and B is required. requires_all: requirements: - - requires_any: requirements: - provider_name: provider-A - - allow_missing: {} - provider_name: provider-B [#next-free-field: 7] - """ - - # Specify a required provider name. - provider_name: str = betterproto.string_field(1, group="requires_type") - # Specify a required provider with audiences. - provider_and_audiences: "ProviderWithAudiences" = betterproto.message_field( - 2, group="requires_type" - ) - # Specify list of JwtRequirement. Their results are OR-ed. If any one of them - # passes, the result is passed. - requires_any: "JwtRequirementOrList" = betterproto.message_field( - 3, group="requires_type" - ) - # Specify list of JwtRequirement. Their results are AND-ed. All of them must - # pass, if one of them fails or missing, it fails. - requires_all: "JwtRequirementAndList" = betterproto.message_field( - 4, group="requires_type" - ) - # The requirement is always satisfied even if JWT is missing or the JWT - # verification fails. A typical usage is: this filter is used to only verify - # JWTs and pass the verified JWT payloads to another filter, the other filter - # will make decision. In this mode, all JWT tokens will be verified. - allow_missing_or_failed: "betterproto_lib_google_protobuf.Empty" = ( - betterproto.message_field(5, group="requires_type") - ) - # The requirement is satisfied if JWT is missing, but failed if JWT is - # presented but invalid. Similar to allow_missing_or_failed, this is used to - # only verify JWTs and pass the verified payload to another filter. The - # different is this mode will reject requests with invalid tokens. - allow_missing: "betterproto_lib_google_protobuf.Empty" = betterproto.message_field( - 6, group="requires_type" - ) - - -@dataclass(eq=False, repr=False) -class JwtRequirementOrList(betterproto.Message): - """ - This message specifies a list of RequiredProvider. Their results are OR-ed; - if any one of them passes, the result is passed - """ - - # Specify a list of JwtRequirement. - requirements: List["JwtRequirement"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class JwtRequirementAndList(betterproto.Message): - """ - This message specifies a list of RequiredProvider. Their results are AND- - ed; all of them must pass, if one of them fails or missing, it fails. - """ - - # Specify a list of JwtRequirement. - requirements: List["JwtRequirement"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class RequirementRule(betterproto.Message): - """ - This message specifies a Jwt requirement for a specific Route condition. - Example 1: .. code-block:: yaml - match: prefix: /healthz In - above example, "requires" field is empty for /healthz prefix match, it - means that requests matching the path prefix don't require JWT - authentication. Example 2: .. code-block:: yaml - match: prefix: - / requires: { provider_name: provider-A } In above example, all - requests matched the path prefix require jwt authentication from - "provider-A". - """ - - # The route matching parameter. Only when the match is satisfied, the - # "requires" field will apply. For example: following match will match all - # requests. .. code-block:: yaml match: prefix: / - match: "_____api_v2_route__.RouteMatch" = betterproto.message_field(1) - # Specify a Jwt Requirement. Please detail comment in message JwtRequirement. - requires: "JwtRequirement" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class FilterStateRule(betterproto.Message): - """ - This message specifies Jwt requirements based on stream_info.filterState. - This FilterState should use `Router::StringAccessor` object to set a string - value. Other HTTP filters can use it to specify Jwt requirements - dynamically. Example: .. code-block:: yaml name: jwt_selector - requires: issuer_1: provider_name: issuer1 issuer_2: - provider_name: issuer2 If a filter set "jwt_selector" with "issuer_1" to - FilterState for a request, jwt_authn filter will use - JwtRequirement{"provider_name": "issuer1"} to verify. - """ - - # The filter state name to retrieve the `Router::StringAccessor` object. - name: str = betterproto.string_field(1) - # A map of string keys to requirements. The string key is the string value in - # the FilterState with the name specified in the *name* field above. - requires: Dict[str, "JwtRequirement"] = betterproto.map_field( - 3, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class JwtAuthentication(betterproto.Message): - """ - This is the Envoy HTTP filter config for JWT authentication. For example: - .. code-block:: yaml providers: provider1: issuer: issuer1 - audiences: - audience1 - audience2 remote_jwks: - http_uri: uri: https://example.com/.well-known/jwks.json - cluster: example_jwks_cluster provider2: issuer: issuer2 - local_jwks: inline_string: jwks_string rules: # Not jwt - verification is required for /health path - match: prefix: - /health # Jwt verification for provider1 is required for path prefixed - with "prefix" - match: prefix: /prefix requires: - provider_name: provider1 # Jwt verification for either provider1 or - provider2 is required for all other requests. - match: - prefix: / requires: requires_any: requirements: - - provider_name: provider1 - provider_name: provider2 - """ - - # Map of provider names to JwtProviders. .. code-block:: yaml providers: - # provider1: issuer: issuer1 audiences: - audience1 - # - audience2 remote_jwks: http_uri: uri: - # https://example.com/.well-known/jwks.json cluster: - # example_jwks_cluster provider2: issuer: provider2 - # local_jwks: inline_string: jwks_string - providers: Dict[str, "JwtProvider"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - # Specifies requirements based on the route matches. The first matched - # requirement will be applied. If there are overlapped match conditions, - # please put the most specific match first. Examples .. code-block:: yaml - # rules: - match: prefix: /healthz - match: prefix: - # /baz requires: provider_name: provider1 - match: - # prefix: /foo requires: requires_any: requirements: - # - provider_name: provider1 - provider_name: provider2 - - # match: prefix: /bar requires: requires_all: - # requirements: - provider_name: provider1 - - # provider_name: provider2 - rules: List["RequirementRule"] = betterproto.message_field(2) - # This message specifies Jwt requirements based on stream_info.filterState. - # Other HTTP filters can use it to specify Jwt requirements dynamically. The - # *rules* field above is checked first, if it could not find any matches, - # check this one. - filter_state_rules: "FilterStateRule" = betterproto.message_field(3) - # When set to true, bypass the `CORS preflight request - # `_ - # regardless of JWT requirements specified in the rules. - bypass_cors_preflight: bool = betterproto.bool_field(4) - - -from ......api.v2 import core as _____api_v2_core__ -from ......api.v2 import route as _____api_v2_route__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/http/lua/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/lua/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/lua/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/lua/v2/__init__.py deleted file mode 100644 index 108e673..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/lua/v2/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/lua/v2/lua.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Lua(betterproto.Message): - # The Lua code that Envoy will execute. This can be a very small script that - # further loads code from disk if desired. Note that if JSON configuration is - # used, the code must be properly escaped. YAML configuration may be easier - # to read since YAML supports multi-line strings so complex scripts can be - # easily expressed inline in the configuration. - inline_code: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/config/filter/http/on_demand/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/on_demand/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/on_demand/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/on_demand/v2/__init__.py deleted file mode 100644 index aa02c06..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/on_demand/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/on_demand/v2/on_demand.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OnDemand(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/http/original_src/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/original_src/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/original_src/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/original_src/v2alpha1/__init__.py deleted file mode 100644 index 1a61bad..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/original_src/v2alpha1/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/original_src/v2alpha1/original_src.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OriginalSrc(betterproto.Message): - """ - The Original Src filter binds upstream connections to the original source - address determined for the request. This address could come from something - like the Proxy Protocol filter, or it could come from trusted http headers. - [#extension: envoy.filters.http.original_src] - """ - - # Sets the SO_MARK option on the upstream connection's socket to the provided - # value. Used to ensure that non-local addresses may be routed back through - # envoy when binding to the original source address. The option will not be - # applied if the mark is 0. - mark: int = betterproto.uint32_field(1) diff --git a/src/envoy_data_plane/envoy/config/filter/http/rate_limit/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/rate_limit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/rate_limit/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/rate_limit/v2/__init__.py deleted file mode 100644 index b0060ea..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/rate_limit/v2/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/rate_limit/v2/rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """[#next-free-field: 8]""" - - # The rate limit domain to use when calling the rate limit service. - domain: str = betterproto.string_field(1) - # Specifies the rate limit configurations to be applied with the same stage - # number. If not set, the default stage number is 0. .. note:: The filter - # supports a range of 0 - 10 inclusively for stage numbers. - stage: int = betterproto.uint32_field(2) - # The type of requests the filter should apply to. The supported types are - # *internal*, *external* or *both*. A request is considered internal if - # :ref:`x-envoy-internal` is - # set to true. If :ref:`x-envoy-internal` is not set or false, a request is considered external. The - # filter defaults to *both*, and it will apply to all request types. - request_type: str = betterproto.string_field(3) - # The timeout in milliseconds for the rate limit service RPC. If not set, - # this defaults to 20ms. - timeout: timedelta = betterproto.message_field(4) - # The filter's behaviour in case the rate limiting service does not respond - # back. When it is set to true, Envoy will not allow traffic in case of - # communication failure between rate limiting service and the proxy. Defaults - # to false. - failure_mode_deny: bool = betterproto.bool_field(5) - # Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - # of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - # HTTP code will be 200 for a gRPC response. - rate_limited_as_resource_exhausted: bool = betterproto.bool_field(6) - # Configuration for an external rate limit service provider. If not - # specified, any calls to the rate limit service will immediately return - # success. - rate_limit_service: "____ratelimit_v2__.RateLimitServiceConfig" = ( - betterproto.message_field(7) - ) - - -from .....ratelimit import v2 as ____ratelimit_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/rbac/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/rbac/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/rbac/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/rbac/v2/__init__.py deleted file mode 100644 index 9acd2d6..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/rbac/v2/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/rbac/v2/rbac.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Rbac(betterproto.Message): - """RBAC filter config.""" - - # Specify the RBAC rules to be applied globally. If absent, no enforcing RBAC - # policy will be applied. - rules: "____rbac_v2__.Rbac" = betterproto.message_field(1) - # Shadow rules are not enforced by the filter (i.e., returning a 403) but - # will emit stats and logs and can be used for rule testing. If absent, no - # shadow RBAC policy will be applied. - shadow_rules: "____rbac_v2__.Rbac" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RbacPerRoute(betterproto.Message): - # Override the global configuration of the filter with this new config. If - # absent, the global RBAC policy will be disabled for this route. - rbac: "Rbac" = betterproto.message_field(2) - - -from .....rbac import v2 as ____rbac_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/router/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/router/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/router/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/router/v2/__init__.py deleted file mode 100644 index b0daf9b..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/router/v2/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/router/v2/router.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Router(betterproto.Message): - """[#next-free-field: 7]""" - - # Whether the router generates dynamic cluster statistics. Defaults to true. - # Can be disabled in high performance scenarios. - dynamic_stats: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # Whether to start a child span for egress routed calls. This can be useful - # in scenarios where other filters (auth, ratelimit, etc.) make outbound - # calls and have child spans rooted at the same ingress parent. Defaults to - # false. - start_child_span: bool = betterproto.bool_field(2) - # Configuration for HTTP upstream logs emitted by the router. Upstream logs - # are configured in the same way as access logs, but each log entry - # represents an upstream request. Presuming retries are configured, multiple - # upstream requests may be made for each downstream (inbound) request. - upstream_log: List["___accesslog_v2__.AccessLog"] = betterproto.message_field(3) - # Do not add any additional *x-envoy-* headers to requests or responses. This - # only affects the :ref:`router filter generated *x-envoy-* headers - # `, other Envoy filters and the HTTP - # connection manager may continue to set *x-envoy-* headers. - suppress_envoy_headers: bool = betterproto.bool_field(4) - # Specifies a list of HTTP headers to strictly validate. Envoy will reject a - # request and respond with HTTP status 400 if the request contains an invalid - # value for any of the headers listed in this field. Strict header checking - # is only supported for the following headers: Value must be a ','-delimited - # list (i.e. no spaces) of supported retry policy values: * - # :ref:`config_http_filters_router_x-envoy-retry-grpc-on` * - # :ref:`config_http_filters_router_x-envoy-retry-on` Value must be an - # integer: * :ref:`config_http_filters_router_x-envoy-max-retries` * - # :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` * - # :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - strict_check_headers: List[str] = betterproto.string_field(5) - # If not set, ingress Envoy will ignore :ref:`config_http_filters_router_x- - # envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when - # deriving timeout for upstream cluster. - respect_expected_rq_timeout: bool = betterproto.bool_field(6) - - -from ....accesslog import v2 as ___accesslog_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/squash/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/squash/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/squash/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/squash/v2/__init__.py deleted file mode 100644 index 62191e2..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/squash/v2/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/squash/v2/squash.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Squash(betterproto.Message): - """[#next-free-field: 6]""" - - # The name of the cluster that hosts the Squash server. - cluster: str = betterproto.string_field(1) - # When the filter requests the Squash server to create a DebugAttachment, it - # will use this structure as template for the body of the request. It can - # contain reference to environment variables in the form of '{{ ENV_VAR_NAME - # }}'. These can be used to provide the Squash server with more information - # to find the process to attach the debugger to. For example, in a Istio/k8s - # environment, this will contain information on the pod: .. code-block:: json - # { "spec": { "attachment": { "pod": "{{ POD_NAME }}", - # "namespace": "{{ POD_NAMESPACE }}" }, "match_request": true } - # } (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward - # API) - attachment_template: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(2) - ) - # The timeout for individual requests sent to the Squash cluster. Defaults to - # 1 second. - request_timeout: timedelta = betterproto.message_field(3) - # The total timeout Squash will delay a request and wait for it to be - # attached. Defaults to 60 seconds. - attachment_timeout: timedelta = betterproto.message_field(4) - # Amount of time to poll for the status of the attachment object in the - # Squash server (to check if has been attached). Defaults to 1 second. - attachment_poll_period: timedelta = betterproto.message_field(5) - - -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/http/tap/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/tap/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/tap/v2alpha/__init__.py deleted file mode 100644 index effd23c..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/tap/v2alpha/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/tap/v2alpha/tap.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Tap(betterproto.Message): - """Top level configuration for the tap filter.""" - - # Common configuration for the HTTP tap filter. - common_config: "____common_tap_v2_alpha__.CommonExtensionConfig" = ( - betterproto.message_field(1) - ) - - -from .....common.tap import v2alpha as ____common_tap_v2_alpha__ diff --git a/src/envoy_data_plane/envoy/config/filter/http/transcoder/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/transcoder/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/http/transcoder/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/http/transcoder/v2/__init__.py deleted file mode 100644 index a472296..0000000 --- a/src/envoy_data_plane/envoy/config/filter/http/transcoder/v2/__init__.py +++ /dev/null @@ -1,98 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/http/transcoder/v2/transcoder.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GrpcJsonTranscoder(betterproto.Message): - """[#next-free-field: 10]""" - - # Supplies the filename of :ref:`the proto descriptor set - # ` for the gRPC services. - proto_descriptor: str = betterproto.string_field(1, group="descriptor_set") - # Supplies the binary content of :ref:`the proto descriptor set - # ` for the gRPC services. - proto_descriptor_bin: bytes = betterproto.bytes_field(4, group="descriptor_set") - # A list of strings that supplies the fully qualified service names (i.e. - # "package_name.service_name") that the transcoder will translate. If the - # service name doesn't exist in ``proto_descriptor``, Envoy will fail at - # startup. The ``proto_descriptor`` may contain more services than the - # service names specified here, but they won't be translated. - services: List[str] = betterproto.string_field(2) - # Control options for response JSON. These options are passed directly to - # `JsonPrintOptions `_. - print_options: "GrpcJsonTranscoderPrintOptions" = betterproto.message_field(3) - # Whether to keep the incoming request route after the outgoing headers have - # been transformed to the match the upstream gRPC service. Note: This means - # that routes for gRPC services that are not transcoded cannot be used in - # combination with *match_incoming_request_route*. - match_incoming_request_route: bool = betterproto.bool_field(5) - # A list of query parameters to be ignored for transcoding method mapping. By - # default, the transcoder filter will not transcode a request if there are - # any unknown/invalid query parameters. Example : .. code-block:: proto - # service Bookstore { rpc GetShelf(GetShelfRequest) returns (Shelf) { - # option (google.api.http) = { get: "/shelves/{shelf}" }; - # } } message GetShelfRequest { int64 shelf = 1; } - # message Shelf {} The request ``/shelves/100?foo=bar`` will not be mapped to - # ``GetShelf``` because variable binding for ``foo`` is not defined. Adding - # ``foo`` to ``ignored_query_parameters`` will allow the same request to be - # mapped to ``GetShelf``. - ignored_query_parameters: List[str] = betterproto.string_field(6) - # Whether to route methods without the ``google.api.http`` option. Example : - # .. code-block:: proto package bookstore; service Bookstore { - # rpc GetShelf(GetShelfRequest) returns (Shelf) {} } message - # GetShelfRequest { int64 shelf = 1; } message Shelf {} The - # client could ``post`` a json body ``{"shelf": 1234}`` with the path of - # ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - auto_mapping: bool = betterproto.bool_field(7) - # Whether to ignore query parameters that cannot be mapped to a corresponding - # protobuf field. Use this if you cannot control the query parameters and do - # not know them beforehand. Otherwise use ``ignored_query_parameters``. - # Defaults to false. - ignore_unknown_query_parameters: bool = betterproto.bool_field(8) - # Whether to convert gRPC status headers to JSON. When trailer indicates a - # gRPC error and there was no HTTP body, take ``google.rpc.Status`` from the - # ``grpc-status-details-bin`` header and use it as JSON body. If there was no - # such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - # ``grpc-message`` headers. The error details types must be present in the - # ``proto_descriptor``. For example, if an upstream server replies with - # headers: .. code-block:: none grpc-status: 5 grpc-status-details- - # bin: - # CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - # The ``grpc-status-details-bin`` header contains a base64-encoded protobuf - # message ``google.rpc.Status``. It will be transcoded into: .. code-block:: - # none HTTP/1.1 404 Not Found content-type: application/json {"co - # de":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","req - # uestId":"r-1"}]} In order to transcode the message, the - # ``google.rpc.RequestInfo`` type from the ``google/rpc/error_details.proto`` - # should be included in the configured :ref:`proto descriptor set - # `. - convert_grpc_status: bool = betterproto.bool_field(9) - - -@dataclass(eq=False, repr=False) -class GrpcJsonTranscoderPrintOptions(betterproto.Message): - # Whether to add spaces, line breaks and indentation to make the JSON output - # easy to read. Defaults to false. - add_whitespace: bool = betterproto.bool_field(1) - # Whether to always print primitive fields. By default primitive fields with - # default values will be omitted in JSON output. For example, an int32 field - # set to 0 will be omitted. Setting this flag to true will override the - # default behavior and print primitive fields regardless of their values. - # Defaults to false. - always_print_primitive_fields: bool = betterproto.bool_field(2) - # Whether to always print enums as ints. By default they are rendered as - # strings. Defaults to false. - always_print_enums_as_ints: bool = betterproto.bool_field(3) - # Whether to preserve proto field names. By default protobuf will generate - # JSON field names using the ``json_name`` option, or lower camel case, in - # that order. Setting this flag will preserve the original field names. - # Defaults to false. - preserve_proto_field_names: bool = betterproto.bool_field(4) diff --git a/src/envoy_data_plane/envoy/config/filter/listener/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/listener/http_inspector/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/http_inspector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/listener/http_inspector/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/http_inspector/v2/__init__.py deleted file mode 100644 index bda456f..0000000 --- a/src/envoy_data_plane/envoy/config/filter/listener/http_inspector/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/listener/http_inspector/v2/http_inspector.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HttpInspector(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/listener/original_dst/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/original_dst/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/listener/original_dst/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/original_dst/v2/__init__.py deleted file mode 100644 index 9cbf152..0000000 --- a/src/envoy_data_plane/envoy/config/filter/listener/original_dst/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/listener/original_dst/v2/original_dst.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OriginalDst(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/listener/original_src/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/original_src/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/listener/original_src/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/original_src/v2alpha1/__init__.py deleted file mode 100644 index 4cc0cce..0000000 --- a/src/envoy_data_plane/envoy/config/filter/listener/original_src/v2alpha1/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/listener/original_src/v2alpha1/original_src.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OriginalSrc(betterproto.Message): - """ - The Original Src filter binds upstream connections to the original source - address determined for the connection. This address could come from - something like the Proxy Protocol filter, or it could come from trusted - http headers. - """ - - # Whether to bind the port to the one used in the original downstream - # connection. [#not-implemented-hide:] - bind_port: bool = betterproto.bool_field(1) - # Sets the SO_MARK option on the upstream connection's socket to the provided - # value. Used to ensure that non-local addresses may be routed back through - # envoy when binding to the original source address. The option will not be - # applied if the mark is 0. - mark: int = betterproto.uint32_field(2) diff --git a/src/envoy_data_plane/envoy/config/filter/listener/proxy_protocol/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/proxy_protocol/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/listener/proxy_protocol/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/proxy_protocol/v2/__init__.py deleted file mode 100644 index e626d15..0000000 --- a/src/envoy_data_plane/envoy/config/filter/listener/proxy_protocol/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ProxyProtocol(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/listener/tls_inspector/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/tls_inspector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/listener/tls_inspector/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/listener/tls_inspector/v2/__init__.py deleted file mode 100644 index 412b12b..0000000 --- a/src/envoy_data_plane/envoy/config/filter/listener/tls_inspector/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class TlsInspector(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/network/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/client_ssl_auth/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/client_ssl_auth/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/client_ssl_auth/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/client_ssl_auth/v2/__init__.py deleted file mode 100644 index 2698dd4..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/client_ssl_auth/v2/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ClientSslAuth(betterproto.Message): - # The :ref:`cluster manager ` cluster that - # runs the authentication service. The filter will connect to the service - # every 60s to fetch the list of principals. The service must support the - # expected :ref:`REST API `. - auth_api_cluster: str = betterproto.string_field(1) - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(2) - # Time in milliseconds between principal refreshes from the authentication - # service. Default is 60000 (60s). The actual fetch time will be this value - # plus a random jittered value between 0-refresh_delay_ms milliseconds. - refresh_delay: timedelta = betterproto.message_field(3) - # An optional list of IP address and subnet masks that should be white listed - # for access by the filter. If no list is provided, there is no IP allowlist. - ip_white_list: List["_____api_v2_core__.CidrRange"] = betterproto.message_field(4) - - -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/direct_response/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/direct_response/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/direct_response/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/direct_response/v2/__init__.py deleted file mode 100644 index 14ea866..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/direct_response/v2/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/direct_response/v2/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - # Response data as a data source. - response: "_____api_v2_core__.DataSource" = betterproto.message_field(1) - - -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/dubbo_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/dubbo_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/dubbo_proxy/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/dubbo_proxy/v2alpha1/__init__.py deleted file mode 100644 index 5f6cde9..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/dubbo_proxy/v2alpha1/__init__.py +++ /dev/null @@ -1,143 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto, envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ProtocolType(betterproto.Enum): - """Dubbo Protocol types supported by Envoy.""" - - # the default protocol. - Dubbo = 0 - - -class SerializationType(betterproto.Enum): - """Dubbo Serialization types supported by Envoy.""" - - # the default serialization protocol. - Hessian2 = 0 - - -@dataclass(eq=False, repr=False) -class RouteConfiguration(betterproto.Message): - """[#next-free-field: 6]""" - - # The name of the route configuration. Reserved for future use in - # asynchronous route discovery. - name: str = betterproto.string_field(1) - # The interface name of the service. - interface: str = betterproto.string_field(2) - # Which group does the interface belong to. - group: str = betterproto.string_field(3) - # The version number of the interface. - version: str = betterproto.string_field(4) - # The list of routes that will be matched, in order, against incoming - # requests. The first route that matches will be used. - routes: List["Route"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class Route(betterproto.Message): - # Route matching parameters. - match: "RouteMatch" = betterproto.message_field(1) - # Route request to some upstream cluster. - route: "RouteAction" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RouteMatch(betterproto.Message): - # Method level routing matching. - method: "MethodMatch" = betterproto.message_field(1) - # Specifies a set of headers that the route should match on. The router will - # check the request’s headers against all the specified headers in the route - # config. A match will happen if all the headers in the route are present in - # the request with the same values (or based on presence if the value field - # is not in the config). - headers: List["_____api_v2_route__.HeaderMatcher"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RouteAction(betterproto.Message): - # Indicates the upstream cluster to which the request should be routed. - cluster: str = betterproto.string_field(1, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. Currently ClusterWeight only supports the name and weight fields. - weighted_clusters: "_____api_v2_route__.WeightedCluster" = ( - betterproto.message_field(2, group="cluster_specifier") - ) - - -@dataclass(eq=False, repr=False) -class MethodMatch(betterproto.Message): - # The name of the method. - name: "_____type_matcher__.StringMatcher" = betterproto.message_field(1) - # Method parameter definition. The key is the parameter index, starting from - # 0. The value is the parameter matching type. - params_match: Dict[ - int, "MethodMatchParameterMatchSpecifier" - ] = betterproto.map_field(2, betterproto.TYPE_UINT32, betterproto.TYPE_MESSAGE) - - -@dataclass(eq=False, repr=False) -class MethodMatchParameterMatchSpecifier(betterproto.Message): - """The parameter matching type.""" - - # If specified, header match will be performed based on the value of the - # header. - exact_match: str = betterproto.string_field(3, group="parameter_match_specifier") - # If specified, header match will be performed based on range. The rule will - # match if the request header value is within this range. The entire request - # header value must represent an integer in base 10 notation: consisting of - # an optional plus or minus sign followed by a sequence of digits. The rule - # will not match if the header value does not represent an integer. Match - # will fail for empty values, floating point numbers or if only a subsequence - # of the header value is an integer. Examples: * For range [-10,0), route - # will match for header value -1, but not for 0, "somestring", 10.9, - # "-1somestring" - range_match: "_____type__.Int64Range" = betterproto.message_field( - 4, group="parameter_match_specifier" - ) - - -@dataclass(eq=False, repr=False) -class DubboProxy(betterproto.Message): - """[#next-free-field: 6]""" - - # The human readable prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(1) - # Configure the protocol used. - protocol_type: "ProtocolType" = betterproto.enum_field(2) - # Configure the serialization protocol used. - serialization_type: "SerializationType" = betterproto.enum_field(3) - # The route table for the connection manager is static and is specified in - # this property. - route_config: List["RouteConfiguration"] = betterproto.message_field(4) - # A list of individual Dubbo filters that make up the filter chain for - # requests made to the Dubbo proxy. Order matters as the filters are - # processed sequentially. For backwards compatibility, if no dubbo_filters - # are specified, a default Dubbo router filter (`envoy.filters.dubbo.router`) - # is used. - dubbo_filters: List["DubboFilter"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class DubboFilter(betterproto.Message): - """DubboFilter configures a Dubbo filter.""" - - # The name of the filter to instantiate. The name must match a supported - # filter. - name: str = betterproto.string_field(1) - # Filter specific configuration which depends on the filter being - # instantiated. See the supported filters for further documentation. - config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -from ...... import type as _____type__ -from ......api.v2 import route as _____api_v2_route__ -from ......type import matcher as _____type_matcher__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/network/echo/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/echo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/echo/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/echo/v2/__init__.py deleted file mode 100644 index f68ba27..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/echo/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/echo/v2/echo.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Echo(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/network/ext_authz/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/ext_authz/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/ext_authz/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/ext_authz/v2/__init__.py deleted file mode 100644 index 5738a1b..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/ext_authz/v2/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/ext_authz/v2/ext_authz.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ExtAuthz(betterproto.Message): - """ - External Authorization filter calls out to an external service over the - gRPC Authorization API defined by :ref:`CheckRequest - `. A failed check will cause - this filter to close the TCP connection. - """ - - # The prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(1) - # The external authorization gRPC service configuration. The default timeout - # is set to 200ms by this filter. - grpc_service: "_____api_v2_core__.GrpcService" = betterproto.message_field(2) - # The filter's behaviour in case the external authorization service does not - # respond back. When it is set to true, Envoy will also allow traffic in case - # of communication failure between authorization service and the proxy. - # Defaults to false. - failure_mode_allow: bool = betterproto.bool_field(3) - # Specifies if the peer certificate is sent to the external service. When - # this field is true, Envoy will include the peer X.509 certificate, if - # available, in the :ref:`certificate`. - include_peer_certificate: bool = betterproto.bool_field(4) - - -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/http_connection_manager/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/http_connection_manager/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/http_connection_manager/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/http_connection_manager/v2/__init__.py deleted file mode 100644 index 675a27e..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/http_connection_manager/v2/__init__.py +++ /dev/null @@ -1,633 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class HttpConnectionManagerCodecType(betterproto.Enum): - AUTO = 0 - HTTP1 = 1 - HTTP2 = 2 - HTTP3 = 3 - - -class HttpConnectionManagerServerHeaderTransformation(betterproto.Enum): - OVERWRITE = 0 - APPEND_IF_ABSENT = 1 - PASS_THROUGH = 2 - - -class HttpConnectionManagerForwardClientCertDetails(betterproto.Enum): - SANITIZE = 0 - FORWARD_ONLY = 1 - APPEND_FORWARD = 2 - SANITIZE_SET = 3 - ALWAYS_FORWARD_ONLY = 4 - - -class HttpConnectionManagerTracingOperationName(betterproto.Enum): - INGRESS = 0 - EGRESS = 1 - - -@dataclass(eq=False, repr=False) -class HttpConnectionManager(betterproto.Message): - """[#next-free-field: 37]""" - - # Supplies the type of codec that the connection manager should use. - codec_type: "HttpConnectionManagerCodecType" = betterproto.enum_field(1) - # The human readable prefix to use when emitting statistics for the - # connection manager. See the :ref:`statistics documentation - # ` for more information. - stat_prefix: str = betterproto.string_field(2) - # The connection manager’s route table will be dynamically loaded via the RDS - # API. - rds: "Rds" = betterproto.message_field(3, group="route_specifier") - # The route table for the connection manager is static and is specified in - # this property. - route_config: "_____api_v2__.RouteConfiguration" = betterproto.message_field( - 4, group="route_specifier" - ) - # A route table will be dynamically assigned to each request based on request - # attributes (e.g., the value of a header). The "routing scopes" (i.e., route - # tables) and "scope keys" are specified in this message. - scoped_routes: "ScopedRoutes" = betterproto.message_field( - 31, group="route_specifier" - ) - # A list of individual HTTP filters that make up the filter chain for - # requests made to the connection manager. :ref:`Order matters - # ` as the filters are processed - # sequentially as request events happen. - http_filters: List["HttpFilter"] = betterproto.message_field(5) - # Whether the connection manager manipulates the - # :ref:`config_http_conn_man_headers_user-agent` and - # :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See - # the linked documentation for more information. Defaults to false. - add_user_agent: Optional[bool] = betterproto.message_field( - 6, wraps=betterproto.TYPE_BOOL - ) - # Presence of the object defines whether the connection manager emits - # :ref:`tracing ` data to the :ref:`configured tracing - # provider `. - tracing: "HttpConnectionManagerTracing" = betterproto.message_field(7) - # Additional settings for HTTP requests handled by the connection manager. - # These will be applicable to both HTTP1 and HTTP2 requests. - common_http_protocol_options: "_____api_v2_core__.HttpProtocolOptions" = ( - betterproto.message_field(35) - ) - # Additional HTTP/1 settings that are passed to the HTTP/1 codec. - http_protocol_options: "_____api_v2_core__.Http1ProtocolOptions" = ( - betterproto.message_field(8) - ) - # Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - http2_protocol_options: "_____api_v2_core__.Http2ProtocolOptions" = ( - betterproto.message_field(9) - ) - # An optional override that the connection manager will write to the server - # header in responses. If not set, the default is *envoy*. - server_name: str = betterproto.string_field(10) - # Defines the action to be applied to the Server header on the response path. - # By default, Envoy will overwrite the header with the value specified in - # server_name. - server_header_transformation: "HttpConnectionManagerServerHeaderTransformation" = ( - betterproto.enum_field(34) - ) - # The maximum request headers size for incoming connections. If unconfigured, - # the default max request headers allowed is 60 KiB. Requests that exceed - # this limit will receive a 431 response. - max_request_headers_kb: Optional[int] = betterproto.message_field( - 29, wraps=betterproto.TYPE_UINT32 - ) - # The idle timeout for connections managed by the connection manager. The - # idle timeout is defined as the period in which there are no active - # requests. If not set, there is no idle timeout. When the idle timeout is - # reached the connection will be closed. If the connection is an HTTP/2 - # connection a drain sequence will occur prior to closing the connection. - # This field is deprecated. Use :ref:`idle_timeout ` instead. - idle_timeout: timedelta = betterproto.message_field(11) - # The stream idle timeout for connections managed by the connection manager. - # If not specified, this defaults to 5 minutes. The default value was - # selected so as not to interfere with any smaller configured timeouts that - # may have existed in configurations prior to the introduction of this - # feature, while introducing robustness to TCP connections that terminate - # without a FIN. This idle timeout applies to new streams and is overridable - # by the :ref:`route-level idle_timeout - # `. Even on a stream in - # which the override applies, prior to receipt of the initial request - # headers, the :ref:`stream_idle_timeout ` - # applies. Each time an encode/decode event for headers or data is processed - # for the stream, the timer will be reset. If the timeout fires, the stream - # is terminated with a 408 Request Timeout error code if no upstream response - # header has been received, otherwise a stream reset occurs. This timeout - # also specifies the amount of time that Envoy will wait for the peer to open - # enough window to write any remaining stream data once the entirety of - # stream data (local end stream is true) has been buffered pending available - # window. In other words, this timeout defends against a peer that does not - # release enough window to completely write the stream, even though all data - # has been proxied within available flow control windows. If the timeout is - # hit in this case, the :ref:`tx_flush_timeout - # ` counter will be incremented. Note - # that :ref:`max_stream_duration - # ` does not - # apply to this corner case. Note that it is possible to idle timeout even if - # the wire traffic for a stream is non-idle, due to the granularity of events - # presented to the connection manager. For example, while receiving very - # large request headers, it may be the case that there is traffic regularly - # arriving on the wire while the connection manage is only able to observe - # the end-of-headers event, hence the stream may still idle timeout. A value - # of 0 will completely disable the connection manager stream idle timeout, - # although per-route idle timeout overrides will continue to apply. - stream_idle_timeout: timedelta = betterproto.message_field(24) - # The amount of time that Envoy will wait for the entire request to be - # received. The timer is activated when the request is initiated, and is - # disarmed when the last byte of the request is sent upstream (i.e. all - # decoding filters have processed the request), OR when the response is - # initiated. If not specified or set to 0, this timeout is disabled. - request_timeout: timedelta = betterproto.message_field(28) - # The time that Envoy will wait between sending an HTTP/2 “shutdown - # notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - # This is used so that Envoy provides a grace period for new streams that - # race with the final GOAWAY frame. During this grace period, Envoy will - # continue to accept new streams. After the grace period, a final GOAWAY - # frame is sent and Envoy will start refusing new streams. Draining occurs - # both when a connection hits the idle timeout or during general server - # draining. The default grace period is 5000 milliseconds (5 seconds) if this - # option is not specified. - drain_timeout: timedelta = betterproto.message_field(12) - # The delayed close timeout is for downstream connections managed by the HTTP - # connection manager. It is defined as a grace period after connection close - # processing has been locally initiated during which Envoy will wait for the - # peer to close (i.e., a TCP FIN/RST is received by Envoy from the downstream - # connection) prior to Envoy closing the socket associated with that - # connection. NOTE: This timeout is enforced even when the socket associated - # with the downstream connection is pending a flush of the write buffer. - # However, any progress made writing data to the socket will restart the - # timer associated with this timeout. This means that the total grace period - # for a socket in this state will be - # +. - # Delaying Envoy's connection close and giving the peer the opportunity to - # initiate the close sequence mitigates a race condition that exists when - # downstream clients do not drain/process data in a connection's receive - # buffer after a remote close has been detected via a socket write(). This - # race leads to such clients failing to process the response code sent by - # Envoy, which could result in erroneous downstream processing. If the - # timeout triggers, Envoy will close the connection's socket. The default - # timeout is 1000 ms if this option is not specified. .. NOTE:: To be - # useful in avoiding the race condition described above, this timeout must be - # set to *at least* +<100ms to account for a reasonable "worst" case processing time - # for a full iteration of Envoy's event loop>. .. WARNING:: A value of 0 - # will completely disable delayed close processing. When disabled, the - # downstream connection's socket will be closed immediately after the - # write flush is completed or will never close if the write flush does not - # complete. - delayed_close_timeout: timedelta = betterproto.message_field(26) - # Configuration for :ref:`HTTP access logs ` - # emitted by the connection manager. - access_log: List["___accesslog_v2__.AccessLog"] = betterproto.message_field(13) - # If set to true, the connection manager will use the real remote address of - # the client connection when determining internal versus external origin and - # manipulating various headers. If set to false or absent, the connection - # manager will use the :ref:`config_http_conn_man_headers_x-forwarded-for` - # HTTP header. See the documentation for - # :ref:`config_http_conn_man_headers_x-forwarded-for`, - # :ref:`config_http_conn_man_headers_x-envoy-internal`, and - # :ref:`config_http_conn_man_headers_x-envoy-external-address` for more - # information. - use_remote_address: Optional[bool] = betterproto.message_field( - 14, wraps=betterproto.TYPE_BOOL - ) - # The number of additional ingress proxy hops from the right side of the - # :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust - # when determining the origin client's IP address. The default is zero if - # this option is not specified. See the documentation for - # :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - xff_num_trusted_hops: int = betterproto.uint32_field(19) - # Configures what network addresses are considered internal for stats and - # header sanitation purposes. If unspecified, only RFC1918 IP addresses will - # be considered internal. See the documentation for - # :ref:`config_http_conn_man_headers_x-envoy-internal` for more information - # about internal/external addresses. - internal_address_config: "HttpConnectionManagerInternalAddressConfig" = ( - betterproto.message_field(25) - ) - # If set, Envoy will not append the remote address to the - # :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may - # be used in conjunction with HTTP filters that explicitly manipulate XFF - # after the HTTP connection manager has mutated the request headers. While - # :ref:`use_remote_address ` will also - # suppress XFF addition, it has consequences for logging and other Envoy uses - # of the remote address, so *skip_xff_append* should be used when only an - # elision of XFF addition is intended. - skip_xff_append: bool = betterproto.bool_field(21) - # Via header value to append to request and response headers. If this is - # empty, no via header will be appended. - via: str = betterproto.string_field(22) - # Whether the connection manager will generate the :ref:`x-request-id - # ` header if it does not exist. - # This defaults to true. Generating a random UUID4 is expensive so in high - # throughput scenarios where this feature is not desired it can be disabled. - generate_request_id: Optional[bool] = betterproto.message_field( - 15, wraps=betterproto.TYPE_BOOL - ) - # Whether the connection manager will keep the :ref:`x-request-id - # ` header if passed for a request - # that is edge (Edge request is the request from external clients to front - # Envoy) and not reset it, which is the current Envoy behaviour. This - # defaults to false. - preserve_external_request_id: bool = betterproto.bool_field(32) - # How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client- - # cert` (XFCC) HTTP header. - forward_client_cert_details: "HttpConnectionManagerForwardClientCertDetails" = ( - betterproto.enum_field(16) - ) - # This field is valid only when :ref:`forward_client_cert_details ` is APPEND_FORWARD or SANITIZE_SET and the - # client connection is mTLS. It specifies the fields in the client - # certificate to be forwarded. Note that in the - # :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* - # is always set, and *By* is always set when the client certificate presents - # the URI type Subject Alternative Name value. - set_current_client_cert_details: "HttpConnectionManagerSetCurrentClientCertDetails" = betterproto.message_field( - 17 - ) - # If proxy_100_continue is true, Envoy will proxy incoming "Expect: - # 100-continue" headers upstream, and forward "100 Continue" responses - # downstream. If this is false or not set, Envoy will instead strip the - # "Expect: 100-continue" header, and send a "100 Continue" response itself. - proxy_100_continue: bool = betterproto.bool_field(18) - # If :ref:`use_remote_address ` is true and - # represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote - # address is an IPv4 address, the address will be mapped to IPv6 before it is - # appended to *x-forwarded-for*. This is useful for testing compatibility of - # upstream services that parse the header value. For example, 50.0.0.1 is - # represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - # `_ for details. This - # will also affect the :ref:`config_http_conn_man_headers_x-envoy-external- - # address` header. See :ref:`http_connection_manager.represent_ipv4_remote_ad - # dress_as_ipv4_mapped_ipv6 ` for runtime control. [#not-implemented- - # hide:] - represent_ipv4_remote_address_as_ipv4_mapped_ipv6: bool = betterproto.bool_field(20) - upgrade_configs: List[ - "HttpConnectionManagerUpgradeConfig" - ] = betterproto.message_field(23) - # Should paths be normalized according to RFC 3986 before any processing of - # requests by HTTP filters or routing? This affects the upstream *:path* - # header as well. For paths that fail this check, Envoy will respond with 400 - # to paths that are malformed. This defaults to false currently but will - # default true in the future. When not specified, this value may be - # overridden by the runtime variable :ref:`http_connection_manager.normalize_ - # path`. See `Normalization and - # Comparison `_ for details of - # normalization. Note that Envoy does not perform `case normalization - # `_ - normalize_path: Optional[bool] = betterproto.message_field( - 30, wraps=betterproto.TYPE_BOOL - ) - # Determines if adjacent slashes in the path are merged into one before any - # processing of requests by HTTP filters or routing. This affects the - # upstream *:path* header as well. Without setting this option, incoming - # requests with path `//dir///file` will not match against route with - # `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging - # is not part of `HTTP spec `_ and is - # provided for convenience. - merge_slashes: bool = betterproto.bool_field(33) - # The configuration of the request ID extension. This includes operations - # such as generation, validation, and associated tracing operations. If not - # set, Envoy uses the default UUID-based behavior: 1. Request ID is - # propagated using *x-request-id* header. 2. Request ID is a universally - # unique identifier (UUID). 3. Tracing decision (sampled, forced, etc) is set - # in 14th byte of the UUID. - request_id_extension: "RequestIdExtension" = betterproto.message_field(36) - - def __post_init__(self) -> None: - super().__post_init__() - if self.idle_timeout: - warnings.warn( - "HttpConnectionManager.idle_timeout is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerTracing(betterproto.Message): - """[#next-free-field: 10]""" - - # The span name will be derived from this field. If :ref:`traffic_direction - # ` is specified on the parent - # listener, then it is used instead of this field. .. attention:: This field - # has been deprecated in favor of `traffic_direction`. - operation_name: "HttpConnectionManagerTracingOperationName" = ( - betterproto.enum_field(1) - ) - # A list of header names used to create tags for the active span. The header - # name is used to populate the tag name, and the header value is used to - # populate the tag value. The tag is created if the specified header name is - # present in the request's headers. .. attention:: This field has been - # deprecated in favor of :ref:`custom_tags - # `. - request_headers_for_tags: List[str] = betterproto.string_field(2) - # Target percentage of requests managed by this HTTP connection manager that - # will be force traced if the :ref:`x-client-trace-id - # ` header is set. This field - # is a direct analog for the runtime variable 'tracing.client_sampling' in - # the :ref:`HTTP Connection Manager `. Default: - # 100% - client_sampling: "_____type__.Percent" = betterproto.message_field(3) - # Target percentage of requests managed by this HTTP connection manager that - # will be randomly selected for trace generation, if not requested by the - # client or not forced. This field is a direct analog for the runtime - # variable 'tracing.random_sampling' in the :ref:`HTTP Connection Manager - # `. Default: 100% - random_sampling: "_____type__.Percent" = betterproto.message_field(4) - # Target percentage of requests managed by this HTTP connection manager that - # will be traced after all other sampling checks have been applied (client- - # directed, force tracing, random sampling). This field functions as an upper - # limit on the total configured sampling rate. For instance, setting - # client_sampling to 100% but overall_sampling to 1% will result in only 1% - # of client requests with the appropriate headers to be force traced. This - # field is a direct analog for the runtime variable 'tracing.global_enabled' - # in the :ref:`HTTP Connection Manager `. - # Default: 100% - overall_sampling: "_____type__.Percent" = betterproto.message_field(5) - # Whether to annotate spans with additional data. If true, spans will include - # logs for stream events. - verbose: bool = betterproto.bool_field(6) - # Maximum length of the request path to extract and include in the HttpUrl - # tag. Used to truncate lengthy request paths to meet the needs of a tracing - # backend. Default: 256 - max_path_tag_length: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # A list of custom tags with unique tag name to create tags for the active - # span. - custom_tags: List["_____type_tracing_v2__.CustomTag"] = betterproto.message_field(8) - # Configuration for an external tracing provider. If not specified, no - # tracing will be performed. .. attention:: Please be aware that - # *envoy.tracers.opencensus* provider can only be configured once in Envoy - # lifetime. Any attempts to reconfigure it or to use different - # configurations for different HCM filters will be rejected. Such a - # constraint is inherent to OpenCensus itself. It cannot be overcome without - # changes on OpenCensus side. - provider: "____trace_v2__.TracingHttp" = betterproto.message_field(9) - - def __post_init__(self) -> None: - super().__post_init__() - if self.operation_name: - warnings.warn( - "HttpConnectionManagerTracing.operation_name is deprecated", - DeprecationWarning, - ) - if self.request_headers_for_tags: - warnings.warn( - "HttpConnectionManagerTracing.request_headers_for_tags is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerInternalAddressConfig(betterproto.Message): - # Whether unix socket addresses should be considered internal. - unix_sockets: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerSetCurrentClientCertDetails(betterproto.Message): - """[#next-free-field: 7]""" - - # Whether to forward the subject of the client cert. Defaults to false. - subject: Optional[bool] = betterproto.message_field(1, wraps=betterproto.TYPE_BOOL) - # Whether to forward the entire client cert in URL encoded PEM format. This - # will appear in the XFCC header comma separated from other values with the - # value Cert="PEM". Defaults to false. - cert: bool = betterproto.bool_field(3) - # Whether to forward the entire client cert chain (including the leaf cert) - # in URL encoded PEM format. This will appear in the XFCC header comma - # separated from other values with the value Chain="PEM". Defaults to false. - chain: bool = betterproto.bool_field(6) - # Whether to forward the DNS type Subject Alternative Names of the client - # cert. Defaults to false. - dns: bool = betterproto.bool_field(4) - # Whether to forward the URI type Subject Alternative Name of the client - # cert. Defaults to false. - uri: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerUpgradeConfig(betterproto.Message): - """ - The configuration for HTTP upgrades. For each upgrade type desired, an - UpgradeConfig must be added. .. warning:: The current implementation of - upgrade headers does not handle multi-valued upgrade headers. Support - for multi-valued headers may be added in the future if needed. .. - warning:: The current implementation of upgrade headers does not work - with HTTP/2 upstreams. - """ - - # The case-insensitive name of this upgrade, e.g. "websocket". For each - # upgrade type present in upgrade_configs, requests with Upgrade: - # [upgrade_type] will be proxied upstream. - upgrade_type: str = betterproto.string_field(1) - # If present, this represents the filter chain which will be created for this - # type of upgrade. If no filters are present, the filter chain for HTTP - # connections will be used for this upgrade type. - filters: List["HttpFilter"] = betterproto.message_field(2) - # Determines if upgrades are enabled or disabled by default. Defaults to - # true. This can be overridden on a per-route basis with :ref:`cluster - # ` as documented in the - # :ref:`upgrade documentation `. - enabled: Optional[bool] = betterproto.message_field(3, wraps=betterproto.TYPE_BOOL) - - -@dataclass(eq=False, repr=False) -class Rds(betterproto.Message): - # Configuration source specifier for RDS. - config_source: "_____api_v2_core__.ConfigSource" = betterproto.message_field(1) - # The name of the route configuration. This name will be passed to the RDS - # API. This allows an Envoy configuration with multiple HTTP listeners (and - # associated HTTP connection manager filters) to use different route - # configurations. - route_config_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfigurationsList(betterproto.Message): - """ - This message is used to work around the limitations with 'oneof' and - repeated fields. - """ - - scoped_route_configurations: List[ - "_____api_v2__.ScopedRouteConfiguration" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ScopedRoutes(betterproto.Message): - """[#next-free-field: 6]""" - - # The name assigned to the scoped routing configuration. - name: str = betterproto.string_field(1) - # The algorithm to use for constructing a scope key for each request. - scope_key_builder: "ScopedRoutesScopeKeyBuilder" = betterproto.message_field(2) - # Configuration source specifier for RDS. This config source is used to - # subscribe to RouteConfiguration resources specified in - # ScopedRouteConfiguration messages. - rds_config_source: "_____api_v2_core__.ConfigSource" = betterproto.message_field(3) - # The set of routing scopes corresponding to the HCM. A scope is assigned to - # a request by matching a key constructed from the request's attributes - # according to the algorithm specified by the :ref:`ScopeKeyBuilder` in this message. - scoped_route_configurations_list: "ScopedRouteConfigurationsList" = ( - betterproto.message_field(4, group="config_specifier") - ) - # The set of routing scopes associated with the HCM will be dynamically - # loaded via the SRDS API. A scope is assigned to a request by matching a key - # constructed from the request's attributes according to the algorithm - # specified by the :ref:`ScopeKeyBuilder` in this message. - scoped_rds: "ScopedRds" = betterproto.message_field(5, group="config_specifier") - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilder(betterproto.Message): - """ - Specifies the mechanism for constructing "scope keys" based on HTTP request - attributes. These keys are matched against a set of - :ref:`Key` objects assembled - from - :ref:`ScopedRouteConfiguration` - messages distributed via SRDS (the Scoped Route Discovery Service) or - assigned statically via :ref:`scoped_route_configurations_list`. Upon receiving a request's headers, the Router - will build a key using the algorithm specified by this message. This key - will be used to look up the routing table (i.e., the - :ref:`RouteConfiguration`) to use for the - request. - """ - - # The final(built) scope key consists of the ordered union of these - # fragments, which are compared in order with the fragments of a - # :ref:`ScopedRouteConfiguration`. A - # missing fragment during comparison will make the key invalid, i.e., the - # computed key doesn't match any key. - fragments: List[ - "ScopedRoutesScopeKeyBuilderFragmentBuilder" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilderFragmentBuilder(betterproto.Message): - """ - Specifies the mechanism for constructing key fragments which are composed - into scope keys. - """ - - # Specifies how a header field's value should be extracted. - header_value_extractor: "ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractor" = betterproto.message_field( - 1, group="type" - ) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractor( - betterproto.Message -): - """ - Specifies how the value of a header should be extracted. The following - example maps the structure of a header to the fields in this message. .. - code:: <0> <1> <-- index X-Header: a=b;c=d | - || | | || \----> | || | - |\----> | | | \----> - | \----> Each 'a=b' key-value pair - constitutes an 'element' of the header field. - """ - - # The name of the header field to extract the value from. .. note:: If the - # header appears multiple times only the first value is used. - name: str = betterproto.string_field(1) - # The element separator (e.g., ';' separates 'a;b;c;d'). Default: empty - # string. This causes the entirety of the header field to be extracted. If - # this field is set to an empty string and 'index' is used in the oneof - # below, 'index' must be set to 0. - element_separator: str = betterproto.string_field(2) - # Specifies the zero based index of the element to extract. Note Envoy - # concatenates multiple values of the same header key into a comma separated - # string, the splitting always happens after the concatenation. - index: int = betterproto.uint32_field(3, group="extract_type") - # Specifies the key value pair to extract the value from. - element: "ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractorKvElement" = betterproto.message_field( - 4, group="extract_type" - ) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractorKvElement( - betterproto.Message -): - """Specifies a header field's key value pair to match on.""" - - # The separator between key and value (e.g., '=' separates 'k=v;...'). If an - # element is an empty string, the element is ignored. If an element contains - # no separator, the whole element is parsed as key and the fragment value is - # an empty string. If there are multiple values for a matched key, the first - # value is returned. - separator: str = betterproto.string_field(1) - # The key to match on. - key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ScopedRds(betterproto.Message): - # Configuration source specifier for scoped RDS. - scoped_rds_config_source: "_____api_v2_core__.ConfigSource" = ( - betterproto.message_field(1) - ) - - -@dataclass(eq=False, repr=False) -class HttpFilter(betterproto.Message): - # The name of the filter to instantiate. The name must match a - # :ref:`supported filter `. - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 4, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("HttpFilter.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class RequestIdExtension(betterproto.Message): - # Request ID extension specific configuration. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - - -from ...... import type as _____type__ -from ......api import v2 as _____api_v2__ -from ......api.v2 import core as _____api_v2_core__ -from ......type.tracing import v2 as _____type_tracing_v2__ -from .....trace import v2 as ____trace_v2__ -from ....accesslog import v2 as ___accesslog_v2__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/network/kafka_broker/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/kafka_broker/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/kafka_broker/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/kafka_broker/v2alpha1/__init__.py deleted file mode 100644 index a9ef337..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/kafka_broker/v2alpha1/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class KafkaBroker(betterproto.Message): - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/config/filter/network/local_rate_limit/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/local_rate_limit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/local_rate_limit/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/local_rate_limit/v2alpha/__init__.py deleted file mode 100644 index 5f58168..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/local_rate_limit/v2alpha/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class LocalRateLimit(betterproto.Message): - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The token bucket configuration to use for rate limiting connections that - # are processed by the filter's filter chain. Each incoming connection - # processed by the filter consumes a single token. If the token is available, - # the connection will be allowed. If no tokens are available, the connection - # will be immediately closed. .. note:: In the current implementation each - # filter and filter chain has an independent rate limit. .. note:: In the - # current implementation the token bucket's :ref:`fill_interval - # ` must be >= 50ms to avoid - # too aggressive refills. - token_bucket: "_____type__.TokenBucket" = betterproto.message_field(2) - # Runtime flag that controls whether the filter is enabled or not. If not - # specified, defaults to enabled. - runtime_enabled: "_____api_v2_core__.RuntimeFeatureFlag" = ( - betterproto.message_field(3) - ) - - -from ...... import type as _____type__ -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/mongo_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/mongo_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/mongo_proxy/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/mongo_proxy/v2/__init__.py deleted file mode 100644 index 8e5dbad..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/mongo_proxy/v2/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class MongoProxy(betterproto.Message): - # The human readable prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The optional path to use for writing Mongo access logs. If not access log - # path is specified no access logs will be written. Note that access log is - # also gated :ref:`runtime `. - access_log: str = betterproto.string_field(2) - # Inject a fixed delay before proxying a Mongo operation. Delays are applied - # to the following MongoDB operations: Query, Insert, GetMore, and - # KillCursors. Once an active delay is in progress, all incoming data up - # until the timer event fires will be a part of the delay. - delay: "___fault_v2__.FaultDelay" = betterproto.message_field(3) - # Flag to specify whether :ref:`dynamic metadata - # ` should be emitted. - # Defaults to false. - emit_dynamic_metadata: bool = betterproto.bool_field(4) - - -from ....fault import v2 as ___fault_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/mysql_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/mysql_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/mysql_proxy/v1alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/mysql_proxy/v1alpha1/__init__.py deleted file mode 100644 index fe166c4..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/mysql_proxy/v1alpha1/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class MySqlProxy(betterproto.Message): - # The human readable prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # [#not-implemented-hide:] The optional path to use for writing MySQL access - # logs. If the access log field is empty, access logs will not be written. - access_log: str = betterproto.string_field(2) diff --git a/src/envoy_data_plane/envoy/config/filter/network/rate_limit/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/rate_limit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/rate_limit/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/rate_limit/v2/__init__.py deleted file mode 100644 index 933a124..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/rate_limit/v2/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/rate_limit/v2/rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """[#next-free-field: 7]""" - - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The rate limit domain to use in the rate limit service request. - domain: str = betterproto.string_field(2) - # The rate limit descriptor list to use in the rate limit service request. - descriptors: List[ - "_____api_v2_ratelimit__.RateLimitDescriptor" - ] = betterproto.message_field(3) - # The timeout in milliseconds for the rate limit service RPC. If not set, - # this defaults to 20ms. - timeout: timedelta = betterproto.message_field(4) - # The filter's behaviour in case the rate limiting service does not respond - # back. When it is set to true, Envoy will not allow traffic in case of - # communication failure between rate limiting service and the proxy. Defaults - # to false. - failure_mode_deny: bool = betterproto.bool_field(5) - # Configuration for an external rate limit service provider. If not - # specified, any calls to the rate limit service will immediately return - # success. - rate_limit_service: "____ratelimit_v2__.RateLimitServiceConfig" = ( - betterproto.message_field(6) - ) - - -from ......api.v2 import ratelimit as _____api_v2_ratelimit__ -from .....ratelimit import v2 as ____ratelimit_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/rbac/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/rbac/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/rbac/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/rbac/v2/__init__.py deleted file mode 100644 index 56d391a..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/rbac/v2/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/rbac/v2/rbac.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class RbacEnforcementType(betterproto.Enum): - ONE_TIME_ON_FIRST_BYTE = 0 - CONTINUOUS = 1 - - -@dataclass(eq=False, repr=False) -class Rbac(betterproto.Message): - """ - RBAC network filter config. Header should not be used in rules/shadow_rules - in RBAC network filter as this information is only available in :ref:`RBAC - http filter `. - """ - - # Specify the RBAC rules to be applied globally. If absent, no enforcing RBAC - # policy will be applied. - rules: "____rbac_v2__.Rbac" = betterproto.message_field(1) - # Shadow rules are not enforced by the filter but will emit stats and logs - # and can be used for rule testing. If absent, no shadow RBAC policy will be - # applied. - shadow_rules: "____rbac_v2__.Rbac" = betterproto.message_field(2) - # The prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(3) - # RBAC enforcement strategy. By default RBAC will be enforced only once when - # the first byte of data arrives from the downstream. When used in - # conjunction with filters that emit dynamic metadata after decoding every - # payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to CONTINUOUS - # to enforce RBAC policies on every message boundary. - enforcement_type: "RbacEnforcementType" = betterproto.enum_field(4) - - -from .....rbac import v2 as ____rbac_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/redis_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/redis_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/redis_proxy/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/redis_proxy/v2/__init__.py deleted file mode 100644 index e5274bd..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/redis_proxy/v2/__init__.py +++ /dev/null @@ -1,220 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class RedisProxyConnPoolSettingsReadPolicy(betterproto.Enum): - MASTER = 0 - PREFER_MASTER = 1 - REPLICA = 2 - PREFER_REPLICA = 3 - ANY = 4 - - -@dataclass(eq=False, repr=False) -class RedisProxy(betterproto.Message): - """[#next-free-field: 7]""" - - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # Name of cluster from cluster manager. See the :ref:`configuration section - # ` of the architecture overview for - # recommendations on configuring the backing cluster. .. attention:: This - # field is deprecated. Use a :ref:`catch_all route` - # instead. - cluster: str = betterproto.string_field(2) - # Network settings for the connection pool to the upstream clusters. - settings: "RedisProxyConnPoolSettings" = betterproto.message_field(3) - # Indicates that latency stat should be computed in microseconds. By default - # it is computed in milliseconds. - latency_in_micros: bool = betterproto.bool_field(4) - # List of **unique** prefixes used to separate keys from different workloads - # to different clusters. Envoy will always favor the longest match first in - # case of overlap. A catch-all cluster can be used to forward commands when - # there is no match. Time complexity of the lookups are in O(min(longest key - # prefix, key length)). Example: .. code-block:: yaml prefix_routes: - # routes: - prefix: "ab" cluster: "cluster_a" - - # prefix: "abc" cluster: "cluster_b" When using the above routes, - # the following prefixes would be sent to: * ``get abc:users`` would retrieve - # the key 'abc:users' from cluster_b. * ``get ab:users`` would retrieve the - # key 'ab:users' from cluster_a. * ``get z:users`` would return a - # NoUpstreamHost error. A :ref:`catch-all route` would - # have retrieved the key from that cluster instead. See the - # :ref:`configuration section ` of the - # architecture overview for recommendations on configuring the backing - # clusters. - prefix_routes: "RedisProxyPrefixRoutes" = betterproto.message_field(5) - # Authenticate Redis client connections locally by forcing downstream clients - # to issue a `Redis AUTH command `_ with this - # password before enabling any other command. If an AUTH command's password - # matches this password, an "OK" response will be returned to the client. If - # the AUTH command password does not match this password, then an "ERR - # invalid password" error will be returned. If any other command is received - # before AUTH when this password is set, then a "NOAUTH Authentication - # required." error response will be sent to the client. If an AUTH command is - # received when the password is not set, then an "ERR Client sent AUTH, but - # no password is set" error will be returned. - downstream_auth_password: "_____api_v2_core__.DataSource" = ( - betterproto.message_field(6) - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.cluster: - warnings.warn("RedisProxy.cluster is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class RedisProxyConnPoolSettings(betterproto.Message): - """Redis connection pool settings. [#next-free-field: 9]""" - - # Per-operation timeout in milliseconds. The timer starts when the first - # command of a pipeline is written to the backend connection. Each response - # received from Redis resets the timer since it signifies that the next - # command is being processed by the backend. The only exception to this - # behavior is when a connection to a backend is not yet established. In that - # case, the connect timeout on the cluster will govern the timeout until the - # connection is ready. - op_timeout: timedelta = betterproto.message_field(1) - # Use hash tagging on every redis key to guarantee that keys with the same - # hash tag will be forwarded to the same upstream. The hash key used for - # determining the upstream in a consistent hash ring configuration will be - # computed from the hash tagged key instead of the whole key. The algorithm - # used to compute the hash tag is identical to the `redis-cluster - # implementation `_. - # Examples: * '{user1000}.following' and '{user1000}.followers' **will** be - # sent to the same upstream * '{user1000}.following' and - # '{user1001}.following' **might** be sent to the same upstream - enable_hashtagging: bool = betterproto.bool_field(2) - # Accept `moved and ask redirection `_ errors from upstream redis servers, and - # retry commands to the specified target server. The target server does not - # need to be known to the cluster manager. If the command cannot be - # redirected, then the original error is passed downstream unchanged. By - # default, this support is not enabled. - enable_redirection: bool = betterproto.bool_field(3) - # Maximum size of encoded request buffer before flush is triggered and - # encoded requests are sent upstream. If this is unset, the buffer flushes - # whenever it receives data and performs no batching. This feature makes it - # possible for multiple clients to send requests to Envoy and have them - # batched- for example if one is running several worker processes, each with - # its own Redis connection. There is no benefit to using this with a single - # downstream process. Recommended size (if enabled) is 1024 bytes. - max_buffer_size_before_flush: int = betterproto.uint32_field(4) - # The encoded request buffer is flushed N milliseconds after the first - # request has been encoded, unless the buffer size has already exceeded - # `max_buffer_size_before_flush`. If `max_buffer_size_before_flush` is not - # set, this flush timer is not used. Otherwise, the timer should be set - # according to the number of clients, overall request rate and desired - # maximum latency for a single command. For example, if there are many - # requests being batched together at a high rate, the buffer will likely be - # filled before the timer fires. Alternatively, if the request rate is lower - # the buffer will not be filled as often before the timer fires. If - # `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, - # the latter defaults to 3ms. - buffer_flush_timeout: timedelta = betterproto.message_field(5) - # `max_upstream_unknown_connections` controls how many upstream connections - # to unknown hosts can be created at any given time by any given worker - # thread (see `enable_redirection` for more details). If the host is unknown - # and a connection cannot be created due to enforcing this limit, then - # redirection will fail and the original redirection error will be passed - # downstream unchanged. This limit defaults to 100. - max_upstream_unknown_connections: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # Enable per-command statistics per upstream cluster, in addition to the - # filter level aggregate count. - enable_command_stats: bool = betterproto.bool_field(8) - # Read policy. The default is to read from the primary. - read_policy: "RedisProxyConnPoolSettingsReadPolicy" = betterproto.enum_field(7) - - -@dataclass(eq=False, repr=False) -class RedisProxyPrefixRoutes(betterproto.Message): - # List of prefix routes. - routes: List["RedisProxyPrefixRoutesRoute"] = betterproto.message_field(1) - # Indicates that prefix matching should be case insensitive. - case_insensitive: bool = betterproto.bool_field(2) - # Optional catch-all route to forward commands that doesn't match any of the - # routes. The catch-all route becomes required when no routes are specified. - # .. attention:: This field is deprecated. Use a :ref:`catch_all route` instead. - catch_all_cluster: str = betterproto.string_field(3) - # Optional catch-all route to forward commands that doesn't match any of the - # routes. The catch-all route becomes required when no routes are specified. - catch_all_route: "RedisProxyPrefixRoutesRoute" = betterproto.message_field(4) - - def __post_init__(self) -> None: - super().__post_init__() - if self.catch_all_cluster: - warnings.warn( - "RedisProxyPrefixRoutes.catch_all_cluster is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class RedisProxyPrefixRoutesRoute(betterproto.Message): - # String prefix that must match the beginning of the keys. Envoy will always - # favor the longest match. - prefix: str = betterproto.string_field(1) - # Indicates if the prefix needs to be removed from the key when forwarded. - remove_prefix: bool = betterproto.bool_field(2) - # Upstream cluster to forward the command to. - cluster: str = betterproto.string_field(3) - # Indicates that the route has a request mirroring policy. - request_mirror_policy: List[ - "RedisProxyPrefixRoutesRouteRequestMirrorPolicy" - ] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RedisProxyPrefixRoutesRouteRequestMirrorPolicy(betterproto.Message): - """ - The router is capable of shadowing traffic from one cluster to another. The - current implementation is "fire and forget," meaning Envoy will not wait - for the shadow cluster to respond before returning the response from the - primary cluster. All normal statistics are collected for the shadow cluster - making this feature useful for testing. - """ - - # Specifies the cluster that requests will be mirrored to. The cluster must - # exist in the cluster manager configuration. - cluster: str = betterproto.string_field(1) - # If not specified or the runtime key is not present, all requests to the - # target cluster will be mirrored. If specified, Envoy will lookup the - # runtime key to get the percentage of requests to the mirror. - runtime_fraction: "_____api_v2_core__.RuntimeFractionalPercent" = ( - betterproto.message_field(2) - ) - # Set this to TRUE to only mirror write commands, this is effectively - # replicating the writes in a "fire and forget" manner. - exclude_read_commands: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class RedisProtocolOptions(betterproto.Message): - """ - RedisProtocolOptions specifies Redis upstream protocol options. This object - is used in :ref:`typed_extension_protocol_options`, keyed by the name - `envoy.filters.network.redis_proxy`. - """ - - # Upstream server password as defined by the `requirepass` directive - # `_ in the server's configuration file. - auth_password: "_____api_v2_core__.DataSource" = betterproto.message_field(1) - - -from ......api.v2 import core as _____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/sni_cluster/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/sni_cluster/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/sni_cluster/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/sni_cluster/v2/__init__.py deleted file mode 100644 index bbdf16b..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/sni_cluster/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class SniCluster(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/network/tcp_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/tcp_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/tcp_proxy/v2/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/tcp_proxy/v2/__init__.py deleted file mode 100644 index 1f9d12d..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/tcp_proxy/v2/__init__.py +++ /dev/null @@ -1,182 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class TcpProxy(betterproto.Message): - """[#next-free-field: 13]""" - - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The upstream cluster to connect to. - cluster: str = betterproto.string_field(2, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. - weighted_clusters: "TcpProxyWeightedCluster" = betterproto.message_field( - 10, group="cluster_specifier" - ) - # Optional endpoint metadata match criteria. Only endpoints in the upstream - # cluster with metadata matching that set in metadata_match will be - # considered. The filter name should be specified as *envoy.lb*. - metadata_match: "_____api_v2_core__.Metadata" = betterproto.message_field(9) - # The idle timeout for connections managed by the TCP proxy filter. The idle - # timeout is defined as the period in which there are no bytes sent or - # received on either the upstream or downstream connection. If not set, the - # default idle timeout is 1 hour. If set to 0s, the timeout will be disabled. - # .. warning:: Disabling this timeout has a highly likelihood of yielding - # connection leaks due to lost TCP FIN packets, etc. - idle_timeout: timedelta = betterproto.message_field(8) - # [#not-implemented-hide:] The idle timeout for connections managed by the - # TCP proxy filter. The idle timeout is defined as the period in which there - # is no active traffic. If not set, there is no idle timeout. When the idle - # timeout is reached the connection will be closed. The distinction between - # downstream_idle_timeout/upstream_idle_timeout provides a means to set - # timeout based on the last byte sent on the downstream/upstream connection. - downstream_idle_timeout: timedelta = betterproto.message_field(3) - # [#not-implemented-hide:] - upstream_idle_timeout: timedelta = betterproto.message_field(4) - # Configuration for :ref:`access logs ` emitted by - # the this tcp_proxy. - access_log: List["___accesslog_v2__.AccessLog"] = betterproto.message_field(5) - # [#not-implemented-hide:] Deprecated. - deprecated_v1: "TcpProxyDeprecatedV1" = betterproto.message_field(6) - # The maximum number of unsuccessful connection attempts that will be made - # before giving up. If the parameter is not specified, 1 connection attempt - # will be made. - max_connect_attempts: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # Optional configuration for TCP proxy hash policy. If hash_policy is not - # set, the hash-based load balancing algorithms will select a host randomly. - # Currently the number of hash policies is limited to 1. - hash_policy: List["_____type__.HashPolicy"] = betterproto.message_field(11) - # [#not-implemented-hide:] feature in progress If set, this configures - # tunneling, e.g. configuration options to tunnel multiple TCP payloads over - # a shared HTTP/2 tunnel. If this message is absent, the payload will be - # proxied upstream as per usual. - tunneling_config: "TcpProxyTunnelingConfig" = betterproto.message_field(12) - - def __post_init__(self) -> None: - super().__post_init__() - if self.deprecated_v1: - warnings.warn("TcpProxy.deprecated_v1 is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class TcpProxyDeprecatedV1(betterproto.Message): - """ - [#not-implemented-hide:] Deprecated. TCP Proxy filter configuration using - V1 format. - """ - - # The route table for the filter. All filter instances must have a route - # table, even if it is empty. - routes: List["TcpProxyDeprecatedV1TcpRoute"] = betterproto.message_field(1) - - def __post_init__(self) -> None: - warnings.warn("TcpProxyDeprecatedV1 is deprecated", DeprecationWarning) - super().__post_init__() - - -@dataclass(eq=False, repr=False) -class TcpProxyDeprecatedV1TcpRoute(betterproto.Message): - """ - A TCP proxy route consists of a set of optional L4 criteria and the name of - a cluster. If a downstream connection matches all the specified criteria, - the cluster in the route is used for the corresponding upstream connection. - Routes are tried in the order specified until a match is found. If no match - is found, the connection is closed. A route with no criteria is valid and - always produces a match. [#next-free-field: 6] - """ - - # The cluster to connect to when a the downstream network connection matches - # the specified criteria. - cluster: str = betterproto.string_field(1) - # An optional list of IP address subnets in the form “ip_address/xx”. The - # criteria is satisfied if the destination IP address of the downstream - # connection is contained in at least one of the specified subnets. If the - # parameter is not specified or the list is empty, the destination IP address - # is ignored. The destination IP address of the downstream connection might - # be different from the addresses on which the proxy is listening if the - # connection has been redirected. - destination_ip_list: List[ - "_____api_v2_core__.CidrRange" - ] = betterproto.message_field(2) - # An optional string containing a comma-separated list of port numbers or - # ranges. The criteria is satisfied if the destination port of the downstream - # connection is contained in at least one of the specified ranges. If the - # parameter is not specified, the destination port is ignored. The - # destination port address of the downstream connection might be different - # from the port on which the proxy is listening if the connection has been - # redirected. - destination_ports: str = betterproto.string_field(3) - # An optional list of IP address subnets in the form “ip_address/xx”. The - # criteria is satisfied if the source IP address of the downstream connection - # is contained in at least one of the specified subnets. If the parameter is - # not specified or the list is empty, the source IP address is ignored. - source_ip_list: List["_____api_v2_core__.CidrRange"] = betterproto.message_field(4) - # An optional string containing a comma-separated list of port numbers or - # ranges. The criteria is satisfied if the source port of the downstream - # connection is contained in at least one of the specified ranges. If the - # parameter is not specified, the source port is ignored. - source_ports: str = betterproto.string_field(5) - - -@dataclass(eq=False, repr=False) -class TcpProxyWeightedCluster(betterproto.Message): - """ - Allows for specification of multiple upstream clusters along with weights - that indicate the percentage of traffic to be forwarded to each cluster. - The router selects an upstream cluster based on these weights. - """ - - # Specifies one or more upstream clusters associated with the route. - clusters: List["TcpProxyWeightedClusterClusterWeight"] = betterproto.message_field( - 1 - ) - - -@dataclass(eq=False, repr=False) -class TcpProxyWeightedClusterClusterWeight(betterproto.Message): - # Name of the upstream cluster. - name: str = betterproto.string_field(1) - # When a request matches the route, the choice of an upstream cluster is - # determined by its weight. The sum of weights across all entries in the - # clusters array determines the total weight. - weight: int = betterproto.uint32_field(2) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field will be considered for load balancing. Note that this will be - # merged with what's provided in :ref:`TcpProxy.metadata_match `, with - # values here taking precedence. The filter name should be specified as - # *envoy.lb*. - metadata_match: "_____api_v2_core__.Metadata" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class TcpProxyTunnelingConfig(betterproto.Message): - """ - Configuration for tunneling TCP over other transports or application - layers. Currently, only HTTP/2 is supported. When other options exist, - HTTP/2 will remain the default. - """ - - # The hostname to send in the synthesized CONNECT headers to the upstream - # proxy. - hostname: str = betterproto.string_field(1) - - -from ...... import type as _____type__ -from ......api.v2 import core as _____api_v2_core__ -from ....accesslog import v2 as ___accesslog_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/network/thrift_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/thrift_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/thrift_proxy/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/thrift_proxy/v2alpha1/__init__.py deleted file mode 100644 index 95927c3..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/thrift_proxy/v2alpha1/__init__.py +++ /dev/null @@ -1,233 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto, envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class TransportType(betterproto.Enum): - """Thrift transport types supported by Envoy.""" - - # For downstream connections, the Thrift proxy will attempt to determine - # which transport to use. For upstream connections, the Thrift proxy will use - # same transport as the downstream connection. - AUTO_TRANSPORT = 0 - # The Thrift proxy will use the Thrift framed transport. - FRAMED = 1 - # The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2 - # The Thrift proxy will assume the client is using the Thrift header - # transport. - HEADER = 3 - - -class ProtocolType(betterproto.Enum): - """Thrift Protocol types supported by Envoy.""" - - # For downstream connections, the Thrift proxy will attempt to determine - # which protocol to use. Note that the older, non-strict (or lax) binary - # protocol is not included in automatic protocol detection. For upstream - # connections, the Thrift proxy will use the same protocol as the downstream - # connection. - AUTO_PROTOCOL = 0 - # The Thrift proxy will use the Thrift binary protocol. - BINARY = 1 - # The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2 - # The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3 - # The Thrift proxy will use the Thrift "Twitter" protocol implemented by the - # finagle library. - TWITTER = 4 - - -@dataclass(eq=False, repr=False) -class RouteConfiguration(betterproto.Message): - # The name of the route configuration. Reserved for future use in - # asynchronous route discovery. - name: str = betterproto.string_field(1) - # The list of routes that will be matched, in order, against incoming - # requests. The first route that matches will be used. - routes: List["Route"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Route(betterproto.Message): - # Route matching parameters. - match: "RouteMatch" = betterproto.message_field(1) - # Route request to some upstream cluster. - route: "RouteAction" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RouteMatch(betterproto.Message): - # If specified, the route must exactly match the request method name. As a - # special case, an empty string matches any request method name. - method_name: str = betterproto.string_field(1, group="match_specifier") - # If specified, the route must have the service name as the request method - # name prefix. As a special case, an empty string matches any service name. - # Only relevant when service multiplexing. - service_name: str = betterproto.string_field(2, group="match_specifier") - # Inverts whatever matching is done in the :ref:`method_name ` or - # :ref:`service_name ` fields. Cannot be combined with wildcard - # matching as that would result in routes never being matched. .. note:: - # This does not invert matching done as part of the :ref:`headers field ` field. To invert header matching, see :ref:`invert_match - # `. - invert: bool = betterproto.bool_field(3) - # Specifies a set of headers that the route should match on. The router will - # check the request’s headers against all the specified headers in the route - # config. A match will happen if all the headers in the route are present in - # the request with the same values (or based on presence if the value field - # is not in the config). Note that this only applies for Thrift transports - # and/or protocols that support headers. - headers: List["_____api_v2_route__.HeaderMatcher"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RouteAction(betterproto.Message): - """[#next-free-field: 7]""" - - # Indicates a single upstream cluster to which the request should be routed - # to. - cluster: str = betterproto.string_field(1, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. - weighted_clusters: "WeightedCluster" = betterproto.message_field( - 2, group="cluster_specifier" - ) - # Envoy will determine the cluster to route to by reading the value of the - # Thrift header named by cluster_header from the request headers. If the - # header is not found or the referenced cluster does not exist Envoy will - # respond with an unknown method exception or an internal error exception, - # respectively. - cluster_header: str = betterproto.string_field(6, group="cluster_specifier") - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field will be considered. Note that this will be merged with what's - # provided in :ref:`WeightedCluster.metadata_match `, with values there taking precedence. Keys and values should be - # provided under the "envoy.lb" metadata key. - metadata_match: "_____api_v2_core__.Metadata" = betterproto.message_field(3) - # Specifies a set of rate limit configurations that could be applied to the - # route. N.B. Thrift service or method name matching can be achieved by - # specifying a RequestHeaders action with the header name ":method-name". - rate_limits: List["_____api_v2_route__.RateLimit"] = betterproto.message_field(4) - # Strip the service prefix from the method name, if there's a prefix. For - # example, the method call Service:method would end up being just method. - strip_service_name: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class WeightedCluster(betterproto.Message): - """ - Allows for specification of multiple upstream clusters along with weights - that indicate the percentage of traffic to be forwarded to each cluster. - The router selects an upstream cluster based on these weights. - """ - - # Specifies one or more upstream clusters associated with the route. - clusters: List["WeightedClusterClusterWeight"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class WeightedClusterClusterWeight(betterproto.Message): - # Name of the upstream cluster. - name: str = betterproto.string_field(1) - # When a request matches the route, the choice of an upstream cluster is - # determined by its weight. The sum of weights across all entries in the - # clusters array determines the total weight. - weight: Optional[int] = betterproto.message_field(2, wraps=betterproto.TYPE_UINT32) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field, combined with what's provided in :ref:`RouteAction's - # metadata_match `, will be considered. Values here will take - # precedence. Keys and values should be provided under the "envoy.lb" - # metadata key. - metadata_match: "_____api_v2_core__.Metadata" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ThriftProxy(betterproto.Message): - """[#next-free-field: 6]""" - - # Supplies the type of transport that the Thrift proxy should use. Defaults - # to :ref:`AUTO_TRANSPORT`. - transport: "TransportType" = betterproto.enum_field(2) - # Supplies the type of protocol that the Thrift proxy should use. Defaults to - # :ref:`AUTO_PROTOCOL`. - protocol: "ProtocolType" = betterproto.enum_field(3) - # The human readable prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(1) - # The route table for the connection manager is static and is specified in - # this property. - route_config: "RouteConfiguration" = betterproto.message_field(4) - # A list of individual Thrift filters that make up the filter chain for - # requests made to the Thrift proxy. Order matters as the filters are - # processed sequentially. For backwards compatibility, if no thrift_filters - # are specified, a default Thrift router filter - # (`envoy.filters.thrift.router`) is used. - thrift_filters: List["ThriftFilter"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class ThriftFilter(betterproto.Message): - """ThriftFilter configures a Thrift filter.""" - - # The name of the filter to instantiate. The name must match a supported - # filter. The built-in filters are: [#comment:TODO(zuercher): Auto generate - # the following list] * :ref:`envoy.filters.thrift.router - # ` * :ref:`envoy.filters.thrift.rate_limit - # ` - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("ThriftFilter.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ThriftProtocolOptions(betterproto.Message): - """ - ThriftProtocolOptions specifies Thrift upstream protocol options. This - object is used in in :ref:`typed_extension_protocol_options`, keyed by the name - `envoy.filters.network.thrift_proxy`. - """ - - # Supplies the type of transport that the Thrift proxy should use for - # upstream connections. Selecting :ref:`AUTO_TRANSPORT`, - # which is the default, causes the proxy to use the same transport as the - # downstream connection. - transport: "TransportType" = betterproto.enum_field(1) - # Supplies the type of protocol that the Thrift proxy should use for upstream - # connections. Selecting :ref:`AUTO_PROTOCOL`, which is the - # default, causes the proxy to use the same protocol as the downstream - # connection. - protocol: "ProtocolType" = betterproto.enum_field(2) - - -from ......api.v2 import core as _____api_v2_core__ -from ......api.v2 import route as _____api_v2_route__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/filter/network/zookeeper_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/zookeeper_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/network/zookeeper_proxy/v1alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/network/zookeeper_proxy/v1alpha1/__init__.py deleted file mode 100644 index 493f390..0000000 --- a/src/envoy_data_plane/envoy/config/filter/network/zookeeper_proxy/v1alpha1/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ZooKeeperProxy(betterproto.Message): - # The human readable prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # [#not-implemented-hide:] The optional path to use for writing ZooKeeper - # access logs. If the access log field is empty, access logs will not be - # written. - access_log: str = betterproto.string_field(2) - # Messages — requests, responses and events — that are bigger than this value - # will be ignored. If it is not set, the default value is 1Mb. The value here - # should match the jute.maxbuffer property in your cluster configuration: - # https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options - # if that is set. If it isn't, ZooKeeper's default is also 1Mb. - max_packet_bytes: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) diff --git a/src/envoy_data_plane/envoy/config/filter/thrift/__init__.py b/src/envoy_data_plane/envoy/config/filter/thrift/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/thrift/rate_limit/__init__.py b/src/envoy_data_plane/envoy/config/filter/thrift/rate_limit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/thrift/rate_limit/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/thrift/rate_limit/v2alpha1/__init__.py deleted file mode 100644 index fa7c36a..0000000 --- a/src/envoy_data_plane/envoy/config/filter/thrift/rate_limit/v2alpha1/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """[#next-free-field: 6]""" - - # The rate limit domain to use in the rate limit service request. - domain: str = betterproto.string_field(1) - # Specifies the rate limit configuration stage. Each configured rate limit - # filter performs a rate limit check using descriptors configured in the :ref - # :`envoy_api_msg_config.filter.network.thrift_proxy.v2alpha1.RouteAction` - # for the request. Only those entries with a matching stage number are used - # for a given filter. If not set, the default stage number is 0. .. note:: - # The filter supports a range of 0 - 10 inclusively for stage numbers. - stage: int = betterproto.uint32_field(2) - # The timeout in milliseconds for the rate limit service RPC. If not set, - # this defaults to 20ms. - timeout: timedelta = betterproto.message_field(3) - # The filter's behaviour in case the rate limiting service does not respond - # back. When it is set to true, Envoy will not allow traffic in case of - # communication failure between rate limiting service and the proxy. Defaults - # to false. - failure_mode_deny: bool = betterproto.bool_field(4) - # Configuration for an external rate limit service provider. If not - # specified, any calls to the rate limit service will immediately return - # success. - rate_limit_service: "____ratelimit_v2__.RateLimitServiceConfig" = ( - betterproto.message_field(5) - ) - - -from .....ratelimit import v2 as ____ratelimit_v2__ diff --git a/src/envoy_data_plane/envoy/config/filter/thrift/router/__init__.py b/src/envoy_data_plane/envoy/config/filter/thrift/router/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/thrift/router/v2alpha1/__init__.py b/src/envoy_data_plane/envoy/config/filter/thrift/router/v2alpha1/__init__.py deleted file mode 100644 index 7e110d7..0000000 --- a/src/envoy_data_plane/envoy/config/filter/thrift/router/v2alpha1/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/thrift/router/v2alpha1/router.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Router(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/filter/udp/__init__.py b/src/envoy_data_plane/envoy/config/filter/udp/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/udp/udp_proxy/__init__.py b/src/envoy_data_plane/envoy/config/filter/udp/udp_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/filter/udp/udp_proxy/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/filter/udp/udp_proxy/v2alpha/__init__.py deleted file mode 100644 index c629b4b..0000000 --- a/src/envoy_data_plane/envoy/config/filter/udp/udp_proxy/v2alpha/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class UdpProxyConfig(betterproto.Message): - """Configuration for the UDP proxy filter.""" - - # The stat prefix used when emitting UDP proxy filter stats. - stat_prefix: str = betterproto.string_field(1) - # The upstream cluster to connect to. - cluster: str = betterproto.string_field(2, group="route_specifier") - # The idle timeout for sessions. Idle is defined as no datagrams between - # received or sent by the session. The default if not specified is 1 minute. - idle_timeout: timedelta = betterproto.message_field(3) diff --git a/src/envoy_data_plane/envoy/config/grpc_credential/__init__.py b/src/envoy_data_plane/envoy/config/grpc_credential/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/grpc_credential/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/grpc_credential/v2alpha/__init__.py deleted file mode 100644 index 2f6aeaa..0000000 --- a/src/envoy_data_plane/envoy/config/grpc_credential/v2alpha/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/grpc_credential/v2alpha/aws_iam.proto, envoy/config/grpc_credential/v2alpha/file_based_metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FileBasedMetadataConfig(betterproto.Message): - # Location or inline data of secret to use for authentication of the Google - # gRPC connection this secret will be attached to a header of the gRPC - # connection - secret_data: "___api_v2_core__.DataSource" = betterproto.message_field(1) - # Metadata header key to use for sending the secret data if no header key is - # set, "authorization" header will be used - header_key: str = betterproto.string_field(2) - # Prefix to prepend to the secret in the metadata header if no prefix is set, - # the default is to use no prefix - header_prefix: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class AwsIamConfig(betterproto.Message): - # The `service namespace `_ of the Grpc - # endpoint. Example: appmesh - service_name: str = betterproto.string_field(1) - # The `region `_ - # hosting the Grpc endpoint. If unspecified, the extension will use the value - # in the ``AWS_REGION`` environment variable. Example: us-west-2 - region: str = betterproto.string_field(2) - - -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/grpc_credential/v3/__init__.py b/src/envoy_data_plane/envoy/config/grpc_credential/v3/__init__.py deleted file mode 100644 index 9087f72..0000000 --- a/src/envoy_data_plane/envoy/config/grpc_credential/v3/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/grpc_credential/v3/aws_iam.proto, envoy/config/grpc_credential/v3/file_based_metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FileBasedMetadataConfig(betterproto.Message): - # Location or inline data of secret to use for authentication of the Google - # gRPC connection this secret will be attached to a header of the gRPC - # connection - secret_data: "__core_v3__.DataSource" = betterproto.message_field(1) - # Metadata header key to use for sending the secret data if no header key is - # set, "authorization" header will be used - header_key: str = betterproto.string_field(2) - # Prefix to prepend to the secret in the metadata header if no prefix is set, - # the default is to use no prefix - header_prefix: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class AwsIamConfig(betterproto.Message): - # The `service namespace `_ of the Grpc - # endpoint. Example: appmesh - service_name: str = betterproto.string_field(1) - # The `region `_ - # hosting the Grpc endpoint. If unspecified, the extension will use the value - # in the ``AWS_REGION`` environment variable. Example: us-west-2 - region: str = betterproto.string_field(2) - - -from ...core import v3 as __core_v3__ diff --git a/src/envoy_data_plane/envoy/config/health_checker/__init__.py b/src/envoy_data_plane/envoy/config/health_checker/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/health_checker/redis/__init__.py b/src/envoy_data_plane/envoy/config/health_checker/redis/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/health_checker/redis/v2/__init__.py b/src/envoy_data_plane/envoy/config/health_checker/redis/v2/__init__.py deleted file mode 100644 index 72863b1..0000000 --- a/src/envoy_data_plane/envoy/config/health_checker/redis/v2/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/health_checker/redis/v2/redis.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Redis(betterproto.Message): - # If set, optionally perform ``EXISTS `` instead of ``PING``. A return - # value from Redis of 0 (does not exist) is considered a passing healthcheck. - # A return value other than 0 is considered a failure. This allows the user - # to mark a Redis instance for maintenance by setting the specified key to - # any value and waiting for traffic to drain. - key: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/config/listener/__init__.py b/src/envoy_data_plane/envoy/config/listener/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/listener/v2/__init__.py b/src/envoy_data_plane/envoy/config/listener/v2/__init__.py deleted file mode 100644 index f5b0ae5..0000000 --- a/src/envoy_data_plane/envoy/config/listener/v2/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/listener/v2/api_listener.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ApiListener(betterproto.Message): - """ - Describes a type of API listener, which is used in non-proxy clients. The - type of API exposed to the non-proxy application depends on the type of API - listener. - """ - - # The type in this field determines the type of API listener. At present, the - # following types are supported: envoy.config.filter.network.http_connection_ - # manager.v2.HttpConnectionManager (HTTP) [#next-major-version: In the v3 - # API, replace this Any field with a oneof containing the specific config - # message for each type of API listener. We could not do this in v2 because - # it would have caused circular dependencies for go protos: lds.proto depends - # on this file, and http_connection_manager.proto depends on rds.proto, which - # is in the same directory as lds.proto, so lds.proto cannot depend on this - # file.] - api_listener: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - - -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/listener/v3/__init__.py b/src/envoy_data_plane/envoy/config/listener/v3/__init__.py deleted file mode 100644 index cae8c7c..0000000 --- a/src/envoy_data_plane/envoy/config/listener/v3/__init__.py +++ /dev/null @@ -1,686 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/listener/v3/api_listener.proto, envoy/config/listener/v3/listener.proto, envoy/config/listener/v3/listener_components.proto, envoy/config/listener/v3/quic_config.proto, envoy/config/listener/v3/udp_listener_config.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class FilterChainMatchConnectionSourceType(betterproto.Enum): - ANY = 0 - SAME_IP_OR_LOOPBACK = 1 - EXTERNAL = 2 - - -class ListenerDrainType(betterproto.Enum): - DEFAULT = 0 - MODIFY_ONLY = 1 - - -@dataclass(eq=False, repr=False) -class QuicProtocolOptions(betterproto.Message): - """ - Configuration specific to the UDP QUIC listener. [#next-free-field: 8] - """ - - quic_protocol_options: "__core_v3__.QuicProtocolOptions" = ( - betterproto.message_field(1) - ) - # Maximum number of milliseconds that connection will be alive when there is - # no network activity. If it is less than 1ms, Envoy will use 1ms. 300000ms - # if not specified. - idle_timeout: timedelta = betterproto.message_field(2) - # Connection timeout in milliseconds before the crypto handshake is finished. - # If it is less than 5000ms, Envoy will use 5000ms. 20000ms if not specified. - crypto_handshake_timeout: timedelta = betterproto.message_field(3) - # Runtime flag that controls whether the listener is enabled or not. If not - # specified, defaults to enabled. - enabled: "__core_v3__.RuntimeFeatureFlag" = betterproto.message_field(4) - # A multiplier to number of connections which is used to determine how many - # packets to read per event loop. A reasonable number should allow the - # listener to process enough payload but not starve TCP and other UDP sockets - # and also prevent long event loop duration. The default value is 32. This - # means if there are N QUIC connections, the total number of packets to read - # in each read event will be 32 * N. The actual number of packets to read in - # total by the UDP listener is also bound by 6000, regardless of this field - # or how many connections there are. - packets_to_read_to_connection_count_ratio: Optional[ - int - ] = betterproto.message_field(5, wraps=betterproto.TYPE_UINT32) - # Configure which implementation of `quic::QuicCryptoClientStreamBase` to be - # used for this listener. If not specified the :ref:`QUICHE default one - # configured by ` will be used. [#extension-category: - # envoy.quic.server.crypto_stream] - crypto_stream_config: "__core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(6) - ) - # Configure which implementation of `quic::ProofSource` to be used for this - # listener. If not specified the :ref:`default one configured by - # ` will - # be used. [#extension-category: envoy.quic.proof_source] - proof_source_config: "__core_v3__.TypedExtensionConfig" = betterproto.message_field( - 7 - ) - - -@dataclass(eq=False, repr=False) -class ApiListener(betterproto.Message): - """ - Describes a type of API listener, which is used in non-proxy clients. The - type of API exposed to the non-proxy application depends on the type of API - listener. - """ - - # The type in this field determines the type of API listener. At present, the - # following types are supported: envoy.extensions.filters.network.http_connec - # tion_manager.v3.HttpConnectionManager (HTTP) envoy.extensions.filters.netwo - # rk.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) - # [#next-major-version: In the v3 API, replace this Any field with a oneof - # containing the specific config message for each type of API listener. We - # could not do this in v2 because it would have caused circular dependencies - # for go protos: lds.proto depends on this file, and - # http_connection_manager.proto depends on rds.proto, which is in the same - # directory as lds.proto, so lds.proto cannot depend on this file.] - api_listener: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Filter(betterproto.Message): - """[#next-free-field: 6]""" - - # The name of the filter to instantiate. The name must match a - # :ref:`supported filter `. - name: str = betterproto.string_field(1) - # Filter specific configuration which depends on the filter being - # instantiated. See the supported filters for further documentation. - # [#extension-category: envoy.filters.network] - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 4, group="config_type" - ) - # Configuration source specifier for an extension configuration discovery - # service. In case of a failure and without the default configuration, the - # listener closes the connections. [#not-implemented-hide:] - config_discovery: "__core_v3__.ExtensionConfigSource" = betterproto.message_field( - 5, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class FilterChainMatch(betterproto.Message): - """ - Specifies the match criteria for selecting a specific filter chain for a - listener. In order for a filter chain to be selected, *ALL* of its criteria - must be fulfilled by the incoming connection, properties of which are set - by the networking stack and/or listener filters. The following order - applies: 1. Destination port. 2. Destination IP address. 3. Server name - (e.g. SNI for TLS protocol), 4. Transport protocol. 5. Application - protocols (e.g. ALPN for TLS protocol). 6. Directly connected source IP - address (this will only be different from the source IP address when - using a listener filter that overrides the source address, such as the - :ref:`Proxy Protocol listener filter - `). 7. Source type (e.g. any, local - or external network). 8. Source IP address. 9. Source port. For criteria - that allow ranges or wildcards, the most specific value in any of the - configured filter chains that matches the incoming connection is going to - be used (e.g. for SNI ``www.example.com`` the most specific match would be - ``www.example.com``, then ``*.example.com``, then ``*.com``, then any - filter chain without ``server_names`` requirements). A different way to - reason about the filter chain matches: Suppose there exists N filter - chains. Prune the filter chain set using the above 8 steps. In each step, - filter chains which most specifically matches the attributes continue to - the next step. The listener guarantees at most 1 filter chain is left after - all of the steps. Example: For destination port, filter chains specifying - the destination port of incoming traffic are the most specific match. If - none of the filter chains specifies the exact destination port, the filter - chains which do not specify ports are the most specific match. Filter - chains specifying the wrong port can never be the most specific match. - [#comment: Implemented rules are kept in the preference order, with - deprecated fields listed at the end, because that's how we want to list - them in the docs. [#comment:TODO(PiotrSikora): Add support for configurable - precedence of the rules] [#next-free-field: 14] - """ - - # Optional destination port to consider when use_original_dst is set on the - # listener in determining a filter chain match. - destination_port: Optional[int] = betterproto.message_field( - 8, wraps=betterproto.TYPE_UINT32 - ) - # If non-empty, an IP address and prefix length to match addresses when the - # listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - prefix_ranges: List["__core_v3__.CidrRange"] = betterproto.message_field(3) - # If non-empty, an IP address and suffix length to match addresses when the - # listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - # [#not-implemented-hide:] - address_suffix: str = betterproto.string_field(4) - # [#not-implemented-hide:] - suffix_len: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # The criteria is satisfied if the directly connected source IP address of - # the downstream connection is contained in at least one of the specified - # subnets. If the parameter is not specified or the list is empty, the - # directly connected source IP address is ignored. - direct_source_prefix_ranges: List[ - "__core_v3__.CidrRange" - ] = betterproto.message_field(13) - # Specifies the connection source IP match type. Can be any, local or - # external network. - source_type: "FilterChainMatchConnectionSourceType" = betterproto.enum_field(12) - # The criteria is satisfied if the source IP address of the downstream - # connection is contained in at least one of the specified subnets. If the - # parameter is not specified or the list is empty, the source IP address is - # ignored. - source_prefix_ranges: List["__core_v3__.CidrRange"] = betterproto.message_field(6) - # The criteria is satisfied if the source port of the downstream connection - # is contained in at least one of the specified ports. If the parameter is - # not specified, the source port is ignored. - source_ports: List[int] = betterproto.uint32_field(7) - # If non-empty, a list of server names (e.g. SNI for TLS protocol) to - # consider when determining a filter chain match. Those values will be - # compared against the server names of a new connection, when detected by one - # of the listener filters. The server name will be matched against all - # wildcard domains, i.e. ``www.example.com`` will be first matched against - # ``www.example.com``, then ``*.example.com``, then ``*.com``. Note that - # partial wildcards are not supported, and values like ``*w.example.com`` are - # invalid. .. attention:: See the :ref:`FAQ entry ` - # on how to configure SNI for more information. - server_names: List[str] = betterproto.string_field(11) - # If non-empty, a transport protocol to consider when determining a filter - # chain match. This value will be compared against the transport protocol of - # a new connection, when it's detected by one of the listener filters. - # Suggested values include: * ``raw_buffer`` - default, used when no - # transport protocol is detected, * ``tls`` - set by - # :ref:`envoy.filters.listener.tls_inspector - # ` when TLS protocol is detected. - transport_protocol: str = betterproto.string_field(9) - # If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) - # to consider when determining a filter chain match. Those values will be - # compared against the application protocols of a new connection, when - # detected by one of the listener filters. Suggested values include: * - # ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - # `, * ``h2`` - set by - # :ref:`envoy.filters.listener.tls_inspector - # ` .. attention:: Currently, only - # :ref:`TLS Inspector ` provides - # application protocol detection based on the requested `ALPN - # `_ - # values. However, the use of ALPN is pretty much limited to the HTTP/2 - # traffic on the Internet, and matching on values other than ``h2`` is - # going to lead to a lot of false negatives, unless all connecting clients - # are known to use ALPN. - application_protocols: List[str] = betterproto.string_field(10) - - -@dataclass(eq=False, repr=False) -class FilterChain(betterproto.Message): - """ - A filter chain wraps a set of match criteria, an option TLS context, a set - of filters, and various other parameters. [#next-free-field: 10] - """ - - # The criteria to use when matching a connection to this filter chain. - filter_chain_match: "FilterChainMatch" = betterproto.message_field(1) - # A list of individual network filters that make up the filter chain for - # connections established with the listener. Order matters as the filters are - # processed sequentially as connection events happen. Note: If the filter - # list is empty, the connection will close by default. - filters: List["Filter"] = betterproto.message_field(3) - # Whether the listener should expect a PROXY protocol V1 header on new - # connections. If this option is enabled, the listener will assume that that - # remote address of the connection is the one specified in the header. Some - # load balancers including the AWS ELB support this option. If the option is - # absent or set to false, Envoy will use the physical peer address of the - # connection as the remote address. This field is deprecated. Add a - # :ref:`PROXY protocol listener filter - # ` explicitly instead. - use_proxy_proto: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # [#not-implemented-hide:] filter chain metadata. - metadata: "__core_v3__.Metadata" = betterproto.message_field(5) - # Optional custom transport socket implementation to use for downstream - # connections. To setup TLS, set a transport socket with name - # `envoy.transport_sockets.tls` and :ref:`DownstreamTlsContext ` in the - # `typed_config`. If no transport socket configuration is specified, new - # connections will be set up with plaintext. [#extension-category: - # envoy.transport_sockets.downstream] - transport_socket: "__core_v3__.TransportSocket" = betterproto.message_field(6) - # If present and nonzero, the amount of time to allow incoming connections to - # complete any transport socket negotiations. If this expires before the - # transport reports connection establishment, the connection is summarily - # closed. - transport_socket_connect_timeout: timedelta = betterproto.message_field(9) - # [#not-implemented-hide:] The unique name (or empty) by which this filter - # chain is known. If no name is provided, Envoy will allocate an internal - # UUID for the filter chain. If the filter chain is to be dynamically updated - # or removed via FCDS a unique name must be provided. - name: str = betterproto.string_field(7) - # [#not-implemented-hide:] The configuration to specify whether the filter - # chain will be built on-demand. If this field is not empty, the filter chain - # will be built on-demand. Otherwise, the filter chain will be built normally - # and block listener warming. - on_demand_configuration: "FilterChainOnDemandConfiguration" = ( - betterproto.message_field(8) - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.use_proxy_proto: - warnings.warn( - "FilterChain.use_proxy_proto is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class FilterChainOnDemandConfiguration(betterproto.Message): - """ - The configuration for on-demand filter chain. If this field is not empty in - FilterChain message, a filter chain will be built on-demand. On-demand - filter chains help speedup the warming up of listeners since the building - and initialization of an on-demand filter chain will be postponed to the - arrival of new connection requests that require this filter chain. Filter - chains that are not often used can be set as on-demand. - """ - - # The timeout to wait for filter chain placeholders to complete rebuilding. - # 1. If this field is set to 0, timeout is disabled. 2. If not specified, a - # default timeout of 15s is used. Rebuilding will wait until dependencies are - # ready, have failed, or this timeout is reached. Upon failure or timeout, - # all connections related to this filter chain will be closed. Rebuilding - # will start again on the next new connection. - rebuild_timeout: timedelta = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ListenerFilterChainMatchPredicate(betterproto.Message): - """ - Listener filter chain match configuration. This is a recursive structure - which allows complex nested match configurations to be built using various - logical operators. Examples: * Matches if the destination port is 3306. .. - code-block:: yaml destination_port_range: start: 3306 end: 3307 * - Matches if the destination port is 3306 or 15000. .. code-block:: yaml - or_match: rules: - destination_port_range: start: 3306 - end: 3307 - destination_port_range: start: 15000 - end: 15001 [#next-free-field: 6] - """ - - # A set that describes a logical OR. If any member of the set matches, the - # match configuration matches. - or_match: "ListenerFilterChainMatchPredicateMatchSet" = betterproto.message_field( - 1, group="rule" - ) - # A set that describes a logical AND. If all members of the set match, the - # match configuration matches. - and_match: "ListenerFilterChainMatchPredicateMatchSet" = betterproto.message_field( - 2, group="rule" - ) - # A negation match. The match configuration will match if the negated match - # condition matches. - not_match: "ListenerFilterChainMatchPredicate" = betterproto.message_field( - 3, group="rule" - ) - # The match configuration will always match. - any_match: bool = betterproto.bool_field(4, group="rule") - # Match destination port. Particularly, the match evaluation must use the - # recovered local port if the owning listener filter is after :ref:`an - # original_dst listener filter `. - destination_port_range: "___type_v3__.Int32Range" = betterproto.message_field( - 5, group="rule" - ) - - -@dataclass(eq=False, repr=False) -class ListenerFilterChainMatchPredicateMatchSet(betterproto.Message): - """A set of match configurations used for logical operations.""" - - # The list of rules that make up the set. - rules: List["ListenerFilterChainMatchPredicate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ListenerFilter(betterproto.Message): - # The name of the filter to instantiate. The name must match a - # :ref:`supported filter `. - name: str = betterproto.string_field(1) - # Filter specific configuration which depends on the filter being - # instantiated. See the supported filters for further documentation. - # [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - # Optional match predicate used to disable the filter. The filter is enabled - # when this field is empty. See :ref:`ListenerFilterChainMatchPredicate - # ` - # for further examples. - filter_disabled: "ListenerFilterChainMatchPredicate" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class UdpListenerConfig(betterproto.Message): - """[#next-free-field: 8]""" - - # UDP socket configuration for the listener. The default for :ref:`prefer_gro - # ` is false - # for listener sockets. If receiving a large amount of datagrams from a small - # number of sources, it may be worthwhile to enable this option after - # performance testing. - downstream_socket_config: "__core_v3__.UdpSocketConfig" = betterproto.message_field( - 5 - ) - # Configuration for QUIC protocol. If empty, QUIC will not be enabled on this - # listener. Set to the default object to enable QUIC without modifying any - # additional options. .. warning:: QUIC support is currently alpha and - # should be used with caution. Please see :ref:`here ` - # for details. - quic_options: "QuicProtocolOptions" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class ActiveRawUdpListenerConfig(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class ListenerCollection(betterproto.Message): - """ - Listener list collections. Entries are *Listener* resources or references. - [#not-implemented-hide:] - """ - - entries: List["____xds_core_v3__.CollectionEntry"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Listener(betterproto.Message): - """[#next-free-field: 32]""" - - # The unique name by which this listener is known. If no name is provided, - # Envoy will allocate an internal UUID for the listener. If the listener is - # to be dynamically updated or removed via :ref:`LDS ` - # a unique name must be provided. - name: str = betterproto.string_field(1) - # The address that the listener should listen on. In general, the address - # must be unique, though that is governed by the bind rules of the OS. E.g., - # multiple listeners can listen on port 0 on Linux as the actual port will be - # allocated by the OS. - address: "__core_v3__.Address" = betterproto.message_field(2) - # Optional prefix to use on listener stats. If empty, the stats will be - # rooted at `listener.
.`. If non-empty, stats will be - # rooted at `listener..`. - stat_prefix: str = betterproto.string_field(28) - # A list of filter chains to consider for this listener. The - # :ref:`FilterChain ` with - # the most specific :ref:`FilterChainMatch - # ` criteria is used on - # a connection. Example using SNI for filter chain selection can be found in - # the :ref:`FAQ entry `. - filter_chains: List["FilterChain"] = betterproto.message_field(3) - # If a connection is redirected using *iptables*, the port on which the proxy - # receives it might be different from the original destination address. When - # this flag is set to true, the listener hands off redirected connections to - # the listener associated with the original destination address. If there is - # no listener associated with the original destination address, the - # connection is handled by the listener that receives it. Defaults to false. - use_original_dst: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # The default filter chain if none of the filter chain matches. If no default - # filter chain is supplied, the connection will be closed. The filter chain - # match is ignored in this field. - default_filter_chain: "FilterChain" = betterproto.message_field(25) - # Soft limit on size of the listener’s new connection read and write buffers. - # If unspecified, an implementation defined default is applied (1MiB). - per_connection_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # Listener metadata. - metadata: "__core_v3__.Metadata" = betterproto.message_field(6) - # [#not-implemented-hide:] - deprecated_v1: "ListenerDeprecatedV1" = betterproto.message_field(7) - # The type of draining to perform at a listener-wide level. - drain_type: "ListenerDrainType" = betterproto.enum_field(8) - # Listener filters have the opportunity to manipulate and augment the - # connection metadata that is used in connection filter chain matching, for - # example. These filters are run before any in :ref:`filter_chains - # `. Order - # matters as the filters are processed sequentially right after a socket has - # been accepted by the listener, and before a connection is created. UDP - # Listener filters can be specified when the protocol in the listener socket - # address in :ref:`protocol - # ` is :ref:`UDP - # `. - listener_filters: List["ListenerFilter"] = betterproto.message_field(9) - # The timeout to wait for all listener filters to complete operation. If the - # timeout is reached, the accepted socket is closed without a connection - # being created unless `continue_on_listener_filters_timeout` is set to true. - # Specify 0 to disable the timeout. If not specified, a default timeout of - # 15s is used. - listener_filters_timeout: timedelta = betterproto.message_field(15) - # Whether a connection should be created when listener filters timeout. - # Default is false. .. attention:: Some listener filters, such as - # :ref:`Proxy Protocol filter `, - # should not be used with this option. It will cause unexpected behavior - # when a connection is created. - continue_on_listener_filters_timeout: bool = betterproto.bool_field(17) - # Whether the listener should be set as a transparent socket. When this flag - # is set to true, connections can be redirected to the listener using an - # *iptables* *TPROXY* target, in which case the original source and - # destination addresses and ports are preserved on accepted connections. This - # flag should be used in combination with :ref:`an original_dst - # ` :ref:`listener filter - # ` to mark - # the connections' local addresses as "restored." This can be used to hand - # off each redirected connection to another listener associated with the - # connection's destination address. Direct connections to the socket without - # using *TPROXY* cannot be distinguished from connections redirected using - # *TPROXY* and are therefore treated as if they were redirected. When this - # flag is set to false, the listener's socket is explicitly reset as non- - # transparent. Setting this flag requires Envoy to run with the - # *CAP_NET_ADMIN* capability. When this flag is not set (default), the socket - # is not modified, i.e. the transparent option is neither set nor reset. - transparent: Optional[bool] = betterproto.message_field( - 10, wraps=betterproto.TYPE_BOOL - ) - # Whether the listener should set the *IP_FREEBIND* socket option. When this - # flag is set to true, listeners can be bound to an IP address that is not - # configured on the system running Envoy. When this flag is set to false, the - # option *IP_FREEBIND* is disabled on the socket. When this flag is not set - # (default), the socket is not modified, i.e. the option is neither enabled - # nor disabled. - freebind: Optional[bool] = betterproto.message_field( - 11, wraps=betterproto.TYPE_BOOL - ) - # Additional socket options that may not be present in Envoy source code or - # precompiled binaries. - socket_options: List["__core_v3__.SocketOption"] = betterproto.message_field(13) - # Whether the listener should accept TCP Fast Open (TFO) connections. When - # this flag is set to a value greater than 0, the option TCP_FASTOPEN is - # enabled on the socket, with a queue length of the specified size (see - # `details in RFC7413 `_). - # When this flag is set to 0, the option TCP_FASTOPEN is disabled on the - # socket. When this flag is not set (default), the socket is not modified, - # i.e. the option is neither enabled nor disabled. On Linux, the - # net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - # TCP_FASTOPEN. See `ip-sysctl.txt - # `_. On - # macOS, only values of 0, 1, and unset are valid; other values may result in - # an error. To set the queue length on macOS, set the - # net.inet.tcp.fastopen_backlog kernel parameter. - tcp_fast_open_queue_length: Optional[int] = betterproto.message_field( - 12, wraps=betterproto.TYPE_UINT32 - ) - # Specifies the intended direction of the traffic relative to the local - # Envoy. This property is required on Windows for listeners using the - # original destination filter, see :ref:`Original Destination - # `. - traffic_direction: "__core_v3__.TrafficDirection" = betterproto.enum_field(16) - # If the protocol in the listener socket address in :ref:`protocol - # ` is :ref:`UDP - # `, this - # field specifies UDP listener specific configuration. - udp_listener_config: "UdpListenerConfig" = betterproto.message_field(18) - # Used to represent an API listener, which is used in non-proxy clients. The - # type of API exposed to the non-proxy application depends on the type of API - # listener. When this field is set, no other field except for - # :ref:`name` should be - # set. .. note:: Currently only one ApiListener can be installed; and it can - # only be done via bootstrap config, not LDS. [#next-major-version: In the - # v3 API, instead of this messy approach where the socket listener fields are - # directly in the top-level Listener message and the API listener types are - # in the ApiListener message, the socket listener messages should be in their - # own message, and the top-level Listener should essentially be a oneof that - # selects between the socket listener and the various types of API listener. - # That way, a given Listener message can structurally only contain the fields - # of the relevant type.] - api_listener: "ApiListener" = betterproto.message_field(19) - # The listener's connection balancer configuration, currently only applicable - # to TCP listeners. If no configuration is specified, Envoy will not attempt - # to balance active connections between worker threads. In the scenario that - # the listener X redirects all the connections to the listeners Y1 and Y2 by - # setting :ref:`use_original_dst - # ` in X and - # :ref:`bind_to_port - # ` to false in - # Y1 and Y2, it is recommended to disable the balance config in listener X to - # avoid the cost of balancing, and enable the balance config in Y1 and Y2 to - # balance the connections among the workers. - connection_balance_config: "ListenerConnectionBalanceConfig" = ( - betterproto.message_field(20) - ) - # Deprecated. Use `enable_reuse_port` instead. - reuse_port: bool = betterproto.bool_field(21) - # When this flag is set to true, listeners set the *SO_REUSEPORT* socket - # option and create one socket for each worker thread. This makes inbound - # connections distribute among worker threads roughly evenly in cases where - # there are a high number of connections. When this flag is set to false, all - # worker threads share one socket. This field defaults to true. .. - # attention:: Although this field defaults to true, it has different - # behavior on different platforms. See the following text for more - # information. * On Linux, reuse_port is respected for both TCP and UDP - # listeners. It also works correctly with hot restart. * On macOS, - # reuse_port for TCP does not do what it does on Linux. Instead of load - # balancing, the last socket wins and receives all connections/packets. For - # TCP, reuse_port is force disabled and the user is warned. For UDP, it is - # enabled, but only one worker will receive packets. For QUIC/H3, SW - # routing will send packets to other workers. For "raw" UDP, only a single - # worker will currently receive packets. * On Windows, reuse_port for TCP has - # undefined behavior. It is force disabled and the user is warned similar - # to macOS. It is left enabled for UDP with undefined behavior currently. - enable_reuse_port: Optional[bool] = betterproto.message_field( - 29, wraps=betterproto.TYPE_BOOL - ) - # Configuration for :ref:`access logs ` emitted by - # this listener. - access_log: List["__accesslog_v3__.AccessLog"] = betterproto.message_field(22) - # The maximum length a tcp listener's pending connections queue can grow to. - # If no value is provided net.core.somaxconn will be used on Linux and 128 - # otherwise. - tcp_backlog_size: Optional[int] = betterproto.message_field( - 24, wraps=betterproto.TYPE_UINT32 - ) - # Whether the listener should bind to the port. A listener that doesn't bind - # can only receive connections redirected from other listeners that set - # :ref:`use_original_dst - # ` to true. - # Default is true. - bind_to_port: Optional[bool] = betterproto.message_field( - 26, wraps=betterproto.TYPE_BOOL - ) - # Used to represent an internal listener which does not listen on OSI L4 - # address but can be used by the :ref:`envoy cluster - # ` to create a user space - # connection to. The internal listener acts as a tcp listener. It supports - # listener filters and network filter chains. The internal listener require - # :ref:`address ` has - # field `envoy_internal_address`. There are some limitations are derived from - # the implementation. The known limitations include * - # :ref:`ConnectionBalanceConfig - # ` is - # not allowed because both cluster connection and listener connection must - # be owned by the same dispatcher. * :ref:`tcp_backlog_size - # ` * - # :ref:`freebind ` * - # :ref:`transparent - # ` [#not- - # implemented-hide:] - internal_listener: "ListenerInternalListenerConfig" = betterproto.message_field( - 27, group="listener_specifier" - ) - # Enable MPTCP (multi-path TCP) on this listener. Clients will be allowed to - # establish MPTCP connections. Non-MPTCP clients will fall back to regular - # TCP. - enable_mptcp: bool = betterproto.bool_field(30) - # Whether the listener should limit connections based upon the value of - # :ref:`global_downstream_max_connections - # `. - ignore_global_conn_limit: bool = betterproto.bool_field(31) - - def __post_init__(self) -> None: - super().__post_init__() - if self.deprecated_v1: - warnings.warn("Listener.deprecated_v1 is deprecated", DeprecationWarning) - if self.reuse_port: - warnings.warn("Listener.reuse_port is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ListenerDeprecatedV1(betterproto.Message): - """[#not-implemented-hide:]""" - - # Whether the listener should bind to the port. A listener that doesn't bind - # can only receive connections redirected from other listeners that set - # use_original_dst parameter to true. Default is true. This is deprecated. - # Use :ref:`Listener.bind_to_port - # ` - bind_to_port: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class ListenerConnectionBalanceConfig(betterproto.Message): - """Configuration for listener connection balancing.""" - - # If specified, the listener will use the exact connection balancer. - exact_balance: "ListenerConnectionBalanceConfigExactBalance" = ( - betterproto.message_field(1, group="balance_type") - ) - - -@dataclass(eq=False, repr=False) -class ListenerConnectionBalanceConfigExactBalance(betterproto.Message): - """ - A connection balancer implementation that does exact balancing. This means - that a lock is held during balancing so that connection counts are nearly - exactly balanced between worker threads. This is "nearly" exact in the - sense that a connection might close in parallel thus making the counts - incorrect, but this should be rectified on the next accept. This balancer - sacrifices accept throughput for accuracy and should be used when there are - a small number of connections that rarely cycle (e.g., service mesh gRPC - egress). - """ - - pass - - -@dataclass(eq=False, repr=False) -class ListenerInternalListenerConfig(betterproto.Message): - """ - Configuration for envoy internal listener. All the future internal listener - features should be added here. [#not-implemented-hide:] - """ - - pass - - -from .....xds.core import v3 as ____xds_core_v3__ -from ....type import v3 as ___type_v3__ -from ...accesslog import v3 as __accesslog_v3__ -from ...core import v3 as __core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/metrics/__init__.py b/src/envoy_data_plane/envoy/config/metrics/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/metrics/v2/__init__.py b/src/envoy_data_plane/envoy/config/metrics/v2/__init__.py deleted file mode 100644 index 8689659..0000000 --- a/src/envoy_data_plane/envoy/config/metrics/v2/__init__.py +++ /dev/null @@ -1,237 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/metrics/v2/metrics_service.proto, envoy/config/metrics/v2/stats.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class StatsSink(betterproto.Message): - """Configuration for pluggable stats sinks.""" - - # The name of the stats sink to instantiate. The name must match a supported - # stats sink. The built-in stats sinks are: * :ref:`envoy.stat_sinks.statsd - # ` * - # :ref:`envoy.stat_sinks.dog_statsd - # ` * - # :ref:`envoy.stat_sinks.metrics_service - # ` * - # :ref:`envoy.stat_sinks.hystrix - # ` Sinks optionally support - # tagged/multiple dimensional metrics. - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("StatsSink.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class StatsConfig(betterproto.Message): - """Statistics configuration such as tagging.""" - - # Each stat name is iteratively processed through these tag specifiers. When - # a tag is matched, the first capture group is removed from the name so later - # :ref:`TagSpecifiers ` cannot - # match that same portion of the match. - stats_tags: List["TagSpecifier"] = betterproto.message_field(1) - # Use all default tag regexes specified in Envoy. These can be combined with - # custom tags specified in :ref:`stats_tags - # `. They will be - # processed before the custom tags. .. note:: If any default tags are - # specified twice, the config will be considered invalid. See - # :repo:`well_known_names.h ` for a - # list of the default tags in Envoy. If not provided, the value is assumed to - # be true. - use_all_default_tags: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # Inclusion/exclusion matcher for stat name creation. If not provided, all - # stats are instantiated as normal. Preventing the instantiation of certain - # families of stats can improve memory performance for Envoys running - # especially large configs. .. warning:: Excluding stats may affect Envoy's - # behavior in undocumented ways. See `issue #8771 - # `_ for more information. - # If any unexpected behavior changes are observed, please open a new issue - # immediately. - stats_matcher: "StatsMatcher" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class StatsMatcher(betterproto.Message): - """Configuration for disabling stat instantiation.""" - - # If `reject_all` is true, then all stats are disabled. If `reject_all` is - # false, then all stats are enabled. - reject_all: bool = betterproto.bool_field(1, group="stats_matcher") - # Exclusive match. All stats are enabled except for those matching one of the - # supplied StringMatcher protos. - exclusion_list: "___type_matcher__.ListStringMatcher" = betterproto.message_field( - 2, group="stats_matcher" - ) - # Inclusive match. No stats are enabled except for those matching one of the - # supplied StringMatcher protos. - inclusion_list: "___type_matcher__.ListStringMatcher" = betterproto.message_field( - 3, group="stats_matcher" - ) - - -@dataclass(eq=False, repr=False) -class TagSpecifier(betterproto.Message): - """ - Designates a tag name and value pair. The value may be either a fixed value - or a regex providing the value via capture groups. The specified tag will - be unconditionally set if a fixed value, otherwise it will only be set if - one or more capture groups in the regex match. - """ - - # Attaches an identifier to the tag values to identify the tag being in the - # sink. Envoy has a set of default names and regexes to extract dynamic - # portions of existing stats, which can be found in :repo:`well_known_names.h - # ` in the Envoy repository. If a - # :ref:`tag_name ` - # is provided in the config and neither :ref:`regex - # ` or - # :ref:`fixed_value - # ` were - # specified, Envoy will attempt to find that name in its set of defaults and - # use the accompanying regex. .. note:: It is invalid to specify the same - # tag name twice in a config. - tag_name: str = betterproto.string_field(1) - # Designates a tag to strip from the tag extracted name and provide as a - # named tag value for all statistics. This will only occur if any part of the - # name matches the regex provided with one or more capture groups. The first - # capture group identifies the portion of the name to remove. The second - # capture group (which will normally be nested inside the first) will - # designate the value of the tag for the statistic. If no second capture - # group is provided, the first will also be used to set the value of the tag. - # All other capture groups will be ignored. Example 1. a stat name - # ``cluster.foo_cluster.upstream_rq_timeout`` and one tag specifier: .. code- - # block:: json { "tag_name": "envoy.cluster_name", "regex": - # "^cluster\\.((.+?)\\.)" } Note that the regex will remove - # ``foo_cluster.`` making the tag extracted name - # ``cluster.upstream_rq_timeout`` and the tag value for - # ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - # ``.`` character because of the second capture group). Example 2. a stat - # name ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and - # two tag specifiers: .. code-block:: json [ { "tag_name": - # "envoy.http_user_agent", "regex": - # "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" }, { - # "tag_name": "envoy.http_conn_manager_prefix", "regex": - # "^http\\.((.*?)\\.)" } ] The two regexes of the specifiers will be - # processed in the definition order. The first regex will remove ``ios.``, - # leaving the tag extracted name - # ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - # ``envoy.http_user_agent`` will be added with tag value ``ios``. The second - # regex will remove ``connection_manager_1.`` from the tag extracted name - # produced by the first regex - # ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - # ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - # ``envoy.http_conn_manager_prefix`` will be added with the tag value - # ``connection_manager_1``. - regex: str = betterproto.string_field(2, group="tag_value") - # Specifies a fixed tag value for the ``tag_name``. - fixed_value: str = betterproto.string_field(3, group="tag_value") - - -@dataclass(eq=False, repr=False) -class StatsdSink(betterproto.Message): - """ - Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* - sink. This sink does not support tagged metrics. [#extension: - envoy.stat_sinks.statsd] - """ - - # The UDP address of a running `statsd `_ - # compliant listener. If specified, statistics will be flushed to this - # address. - address: "___api_v2_core__.Address" = betterproto.message_field( - 1, group="statsd_specifier" - ) - # The name of a cluster that is running a TCP `statsd - # `_ compliant listener. If specified, Envoy - # will connect to this cluster to flush statistics. - tcp_cluster_name: str = betterproto.string_field(2, group="statsd_specifier") - # Optional custom prefix for StatsdSink. If specified, this will override the - # default prefix. For example: .. code-block:: json { "prefix" : - # "envoy-prod" } will change emitted stats to .. code-block:: cpp envoy- - # prod.test_counter:1|c envoy-prod.test_timer:5|ms Note that the default - # prefix, "envoy", will be used if a prefix is not specified. Stats with - # default prefix: .. code-block:: cpp envoy.test_counter:1|c - # envoy.test_timer:5|ms - prefix: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class DogStatsdSink(betterproto.Message): - """ - Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* - sink. The sink emits stats with `DogStatsD - `_ compatible tags. Tags are - configurable via :ref:`StatsConfig - `. [#extension: - envoy.stat_sinks.dog_statsd] - """ - - # The UDP address of a running DogStatsD compliant listener. If specified, - # statistics will be flushed to this address. - address: "___api_v2_core__.Address" = betterproto.message_field( - 1, group="dog_statsd_specifier" - ) - # Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - # ` for more details. - prefix: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class HystrixSink(betterproto.Message): - """ - Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* - sink. The sink emits stats in `text/event-stream - `_ formatted stream for use by - `Hystrix dashboard `_. Note that only a single HystrixSink should be - configured. Streaming is started through an admin endpoint - :http:get:`/hystrix_event_stream`. [#extension: envoy.stat_sinks.hystrix] - """ - - # The number of buckets the rolling statistical window is divided into. Each - # time the sink is flushed, all relevant Envoy statistics are sampled and - # added to the rolling window (removing the oldest samples in the window in - # the process). The sink then outputs the aggregate statistics across the - # current rolling window to the event stream(s). rolling_window(ms) = - # stats_flush_interval(ms) * num_of_buckets More detailed explanation can be - # found in `Hystrix wiki `_. - num_buckets: int = betterproto.int64_field(1) - - -@dataclass(eq=False, repr=False) -class MetricsServiceConfig(betterproto.Message): - """ - Metrics Service is configured as a built-in - *envoy.stat_sinks.metrics_service* :ref:`StatsSink - `. This opaque configuration - will be used to create Metrics Service. [#extension: - envoy.stat_sinks.metrics_service] - """ - - # The upstream gRPC cluster that hosts the metrics service. - grpc_service: "___api_v2_core__.GrpcService" = betterproto.message_field(1) - - -from ....api.v2 import core as ___api_v2_core__ -from ....type import matcher as ___type_matcher__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/metrics/v3/__init__.py b/src/envoy_data_plane/envoy/config/metrics/v3/__init__.py deleted file mode 100644 index 227995d..0000000 --- a/src/envoy_data_plane/envoy/config/metrics/v3/__init__.py +++ /dev/null @@ -1,276 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/metrics/v3/metrics_service.proto, envoy/config/metrics/v3/stats.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class StatsSink(betterproto.Message): - """Configuration for pluggable stats sinks.""" - - # The name of the stats sink to instantiate. The name must match a supported - # stats sink. See the :ref:`extensions listed in typed_config below - # ` for the default list of available - # stats sink. Sinks optionally support tagged/multiple dimensional metrics. - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class StatsConfig(betterproto.Message): - """Statistics configuration such as tagging.""" - - # Each stat name is iteratively processed through these tag specifiers. When - # a tag is matched, the first capture group is removed from the name so later - # :ref:`TagSpecifiers ` - # cannot match that same portion of the match. - stats_tags: List["TagSpecifier"] = betterproto.message_field(1) - # Use all default tag regexes specified in Envoy. These can be combined with - # custom tags specified in :ref:`stats_tags - # `. They will - # be processed before the custom tags. .. note:: If any default tags are - # specified twice, the config will be considered invalid. See - # :repo:`well_known_names.h ` for a - # list of the default tags in Envoy. If not provided, the value is assumed to - # be true. - use_all_default_tags: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # Inclusion/exclusion matcher for stat name creation. If not provided, all - # stats are instantiated as normal. Preventing the instantiation of certain - # families of stats can improve memory performance for Envoys running - # especially large configs. .. warning:: Excluding stats may affect Envoy's - # behavior in undocumented ways. See `issue #8771 - # `_ for more information. - # If any unexpected behavior changes are observed, please open a new issue - # immediately. - stats_matcher: "StatsMatcher" = betterproto.message_field(3) - # Defines rules for setting the histogram buckets. Rules are evaluated in - # order, and the first match is applied. If no match is found (or if no rules - # are set), the following default buckets are used: .. code-block:: json - # [ 0.5, 1, 5, 10, 25, 50, 100, - # 250, 500, 1000, 2500, 5000, 10000, - # 30000, 60000, 300000, 600000, 1800000, - # 3600000 ] - histogram_bucket_settings: List[ - "HistogramBucketSettings" - ] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class StatsMatcher(betterproto.Message): - """Configuration for disabling stat instantiation.""" - - # If `reject_all` is true, then all stats are disabled. If `reject_all` is - # false, then all stats are enabled. - reject_all: bool = betterproto.bool_field(1, group="stats_matcher") - # Exclusive match. All stats are enabled except for those matching one of the - # supplied StringMatcher protos. - exclusion_list: "___type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(2, group="stats_matcher") - ) - # Inclusive match. No stats are enabled except for those matching one of the - # supplied StringMatcher protos. - inclusion_list: "___type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(3, group="stats_matcher") - ) - - -@dataclass(eq=False, repr=False) -class TagSpecifier(betterproto.Message): - """ - Designates a tag name and value pair. The value may be either a fixed value - or a regex providing the value via capture groups. The specified tag will - be unconditionally set if a fixed value, otherwise it will only be set if - one or more capture groups in the regex match. - """ - - # Attaches an identifier to the tag values to identify the tag being in the - # sink. Envoy has a set of default names and regexes to extract dynamic - # portions of existing stats, which can be found in :repo:`well_known_names.h - # ` in the Envoy repository. If a - # :ref:`tag_name - # ` is provided - # in the config and neither :ref:`regex - # ` or - # :ref:`fixed_value - # ` were - # specified, Envoy will attempt to find that name in its set of defaults and - # use the accompanying regex. .. note:: It is invalid to specify the same - # tag name twice in a config. - tag_name: str = betterproto.string_field(1) - # Designates a tag to strip from the tag extracted name and provide as a - # named tag value for all statistics. This will only occur if any part of the - # name matches the regex provided with one or more capture groups. The first - # capture group identifies the portion of the name to remove. The second - # capture group (which will normally be nested inside the first) will - # designate the value of the tag for the statistic. If no second capture - # group is provided, the first will also be used to set the value of the tag. - # All other capture groups will be ignored. Example 1. a stat name - # ``cluster.foo_cluster.upstream_rq_timeout`` and one tag specifier: .. code- - # block:: json { "tag_name": "envoy.cluster_name", "regex": - # "^cluster\\.((.+?)\\.)" } Note that the regex will remove - # ``foo_cluster.`` making the tag extracted name - # ``cluster.upstream_rq_timeout`` and the tag value for - # ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - # ``.`` character because of the second capture group). Example 2. a stat - # name ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and - # two tag specifiers: .. code-block:: json [ { "tag_name": - # "envoy.http_user_agent", "regex": - # "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" }, { - # "tag_name": "envoy.http_conn_manager_prefix", "regex": - # "^http\\.((.*?)\\.)" } ] The two regexes of the specifiers will be - # processed in the definition order. The first regex will remove ``ios.``, - # leaving the tag extracted name - # ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - # ``envoy.http_user_agent`` will be added with tag value ``ios``. The second - # regex will remove ``connection_manager_1.`` from the tag extracted name - # produced by the first regex - # ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - # ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - # ``envoy.http_conn_manager_prefix`` will be added with the tag value - # ``connection_manager_1``. - regex: str = betterproto.string_field(2, group="tag_value") - # Specifies a fixed tag value for the ``tag_name``. - fixed_value: str = betterproto.string_field(3, group="tag_value") - - -@dataclass(eq=False, repr=False) -class HistogramBucketSettings(betterproto.Message): - """ - Specifies a matcher for stats and the buckets that matching stats should - use. - """ - - # The stats that this rule applies to. The match is applied to the original - # stat name before tag-extraction, for example - # `cluster.exampleclustername.upstream_cx_length_ms`. - match: "___type_matcher_v3__.StringMatcher" = betterproto.message_field(1) - # Each value is the upper bound of a bucket. Each bucket must be greater than - # 0 and unique. The order of the buckets does not matter. - buckets: List[float] = betterproto.double_field(2) - - -@dataclass(eq=False, repr=False) -class StatsdSink(betterproto.Message): - """ - Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* - sink. This sink does not support tagged metrics. [#extension: - envoy.stat_sinks.statsd] - """ - - # The UDP address of a running `statsd `_ - # compliant listener. If specified, statistics will be flushed to this - # address. - address: "__core_v3__.Address" = betterproto.message_field( - 1, group="statsd_specifier" - ) - # The name of a cluster that is running a TCP `statsd - # `_ compliant listener. If specified, Envoy - # will connect to this cluster to flush statistics. - tcp_cluster_name: str = betterproto.string_field(2, group="statsd_specifier") - # Optional custom prefix for StatsdSink. If specified, this will override the - # default prefix. For example: .. code-block:: json { "prefix" : - # "envoy-prod" } will change emitted stats to .. code-block:: cpp envoy- - # prod.test_counter:1|c envoy-prod.test_timer:5|ms Note that the default - # prefix, "envoy", will be used if a prefix is not specified. Stats with - # default prefix: .. code-block:: cpp envoy.test_counter:1|c - # envoy.test_timer:5|ms - prefix: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class DogStatsdSink(betterproto.Message): - """ - Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* - sink. The sink emits stats with `DogStatsD - `_ compatible tags. Tags are - configurable via :ref:`StatsConfig - `. [#extension: - envoy.stat_sinks.dog_statsd] - """ - - # The UDP address of a running DogStatsD compliant listener. If specified, - # statistics will be flushed to this address. - address: "__core_v3__.Address" = betterproto.message_field( - 1, group="dog_statsd_specifier" - ) - # Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - # ` for more details. - prefix: str = betterproto.string_field(3) - # Optional max datagram size to use when sending UDP messages. By default - # Envoy will emit one metric per datagram. By specifying a max-size larger - # than a single metric, Envoy will emit multiple, new-line separated metrics. - # The max datagram size should not exceed your network's MTU. Note that this - # value may not be respected if smaller than a single metric. - max_bytes_per_datagram: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT64 - ) - - -@dataclass(eq=False, repr=False) -class HystrixSink(betterproto.Message): - """ - Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* - sink. The sink emits stats in `text/event-stream - `_ formatted stream for use by - `Hystrix dashboard `_. Note that only a single HystrixSink should be - configured. Streaming is started through an admin endpoint - :http:get:`/hystrix_event_stream`. [#extension: envoy.stat_sinks.hystrix] - """ - - # The number of buckets the rolling statistical window is divided into. Each - # time the sink is flushed, all relevant Envoy statistics are sampled and - # added to the rolling window (removing the oldest samples in the window in - # the process). The sink then outputs the aggregate statistics across the - # current rolling window to the event stream(s). rolling_window(ms) = - # stats_flush_interval(ms) * num_of_buckets More detailed explanation can be - # found in `Hystrix wiki `_. - num_buckets: int = betterproto.int64_field(1) - - -@dataclass(eq=False, repr=False) -class MetricsServiceConfig(betterproto.Message): - """ - Metrics Service is configured as a built-in - *envoy.stat_sinks.metrics_service* :ref:`StatsSink - `. This opaque configuration - will be used to create Metrics Service. Example: .. code-block:: yaml - stats_sinks: - name: envoy.stat_sinks.metrics_service - typed_config: "@type": - type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig - transport_api_version: V3 [#extension: envoy.stat_sinks.metrics_service] - """ - - # The upstream gRPC cluster that hosts the metrics service. - grpc_service: "__core_v3__.GrpcService" = betterproto.message_field(1) - # API version for metric service transport protocol. This describes the - # metric service gRPC endpoint and version of messages used on the wire. - transport_api_version: "__core_v3__.ApiVersion" = betterproto.enum_field(3) - # If true, counters are reported as the delta between flushing intervals. - # Otherwise, the current counter value is reported. Defaults to false. - # Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value - # is not set, the sink will take updates from the :ref:`MetricsResponse - # `. - report_counters_as_deltas: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # If true, metrics will have their tags emitted as labels on the metrics - # objects sent to the MetricsService, and the tag extracted name will be used - # instead of the full name, which may contain values used by the tag - # extractor or additional tags added during stats creation. - emit_tags_as_labels: bool = betterproto.bool_field(4) - - -from ....type.matcher import v3 as ___type_matcher_v3__ -from ...core import v3 as __core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/overload/__init__.py b/src/envoy_data_plane/envoy/config/overload/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/overload/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/overload/v2alpha/__init__.py deleted file mode 100644 index 64653a4..0000000 --- a/src/envoy_data_plane/envoy/config/overload/v2alpha/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/overload/v2alpha/overload.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ResourceMonitor(betterproto.Message): - # The name of the resource monitor to instantiate. Must match a registered - # resource monitor type. The built-in resource monitors are: * - # :ref:`envoy.resource_monitors.fixed_heap - # ` - # * :ref:`envoy.resource_monitors.injected_resource ` - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("ResourceMonitor.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ThresholdTrigger(betterproto.Message): - # If the resource pressure is greater than or equal to this value, the - # trigger will fire. - value: float = betterproto.double_field(1) - - -@dataclass(eq=False, repr=False) -class Trigger(betterproto.Message): - # The name of the resource this is a trigger for. - name: str = betterproto.string_field(1) - threshold: "ThresholdTrigger" = betterproto.message_field(2, group="trigger_oneof") - - -@dataclass(eq=False, repr=False) -class OverloadAction(betterproto.Message): - # The name of the overload action. This is just a well-known string that - # listeners can use for registering callbacks. Custom overload actions should - # be named using reverse DNS to ensure uniqueness. - name: str = betterproto.string_field(1) - # A set of triggers for this action. If any of these triggers fire the - # overload action is activated. Listeners are notified when the overload - # action transitions from inactivated to activated, or vice versa. - triggers: List["Trigger"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class OverloadManager(betterproto.Message): - # The interval for refreshing resource usage. - refresh_interval: timedelta = betterproto.message_field(1) - # The set of resources to monitor. - resource_monitors: List["ResourceMonitor"] = betterproto.message_field(2) - # The set of overload actions. - actions: List["OverloadAction"] = betterproto.message_field(3) - - -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/overload/v3/__init__.py b/src/envoy_data_plane/envoy/config/overload/v3/__init__.py deleted file mode 100644 index 465a355..0000000 --- a/src/envoy_data_plane/envoy/config/overload/v3/__init__.py +++ /dev/null @@ -1,132 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/overload/v3/overload.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ScaleTimersOverloadActionConfigTimerType(betterproto.Enum): - UNSPECIFIED = 0 - HTTP_DOWNSTREAM_CONNECTION_IDLE = 1 - HTTP_DOWNSTREAM_STREAM_IDLE = 2 - TRANSPORT_SOCKET_CONNECT = 3 - - -@dataclass(eq=False, repr=False) -class ResourceMonitor(betterproto.Message): - # The name of the resource monitor to instantiate. Must match a registered - # resource monitor type. See the :ref:`extensions listed in typed_config - # below ` for the default list of - # available resource monitor. - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class ThresholdTrigger(betterproto.Message): - # If the resource pressure is greater than or equal to this value, the - # trigger will enter saturation. - value: float = betterproto.double_field(1) - - -@dataclass(eq=False, repr=False) -class ScaledTrigger(betterproto.Message): - # If the resource pressure is greater than this value, the trigger will be in - # the :ref:`scaling ` state - # with value `(pressure - scaling_threshold) / (saturation_threshold - - # scaling_threshold)`. - scaling_threshold: float = betterproto.double_field(1) - # If the resource pressure is greater than this value, the trigger will enter - # saturation. - saturation_threshold: float = betterproto.double_field(2) - - -@dataclass(eq=False, repr=False) -class Trigger(betterproto.Message): - # The name of the resource this is a trigger for. - name: str = betterproto.string_field(1) - threshold: "ThresholdTrigger" = betterproto.message_field(2, group="trigger_oneof") - scaled: "ScaledTrigger" = betterproto.message_field(3, group="trigger_oneof") - - -@dataclass(eq=False, repr=False) -class ScaleTimersOverloadActionConfig(betterproto.Message): - """ - Typed configuration for the "envoy.overload_actions.reduce_timeouts" - action. See :ref:`the docs ` for - an example of how to configure the action with different timeouts and - minimum values. - """ - - # A set of timer scaling rules to be applied. - timer_scale_factors: List[ - "ScaleTimersOverloadActionConfigScaleTimer" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ScaleTimersOverloadActionConfigScaleTimer(betterproto.Message): - # The type of timer this minimum applies to. - timer: "ScaleTimersOverloadActionConfigTimerType" = betterproto.enum_field(1) - # Sets the minimum duration as an absolute value. - min_timeout: timedelta = betterproto.message_field(2, group="overload_adjust") - # Sets the minimum duration as a percentage of the maximum value. - min_scale: "___type_v3__.Percent" = betterproto.message_field( - 3, group="overload_adjust" - ) - - -@dataclass(eq=False, repr=False) -class OverloadAction(betterproto.Message): - # The name of the overload action. This is just a well-known string that - # listeners can use for registering callbacks. Custom overload actions should - # be named using reverse DNS to ensure uniqueness. - name: str = betterproto.string_field(1) - # A set of triggers for this action. The state of the action is the maximum - # state of all triggers, which can be scaling between 0 and 1 or saturated. - # Listeners are notified when the overload action changes state. - triggers: List["Trigger"] = betterproto.message_field(2) - # Configuration for the action being instantiated. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class BufferFactoryConfig(betterproto.Message): - """ - Configuration for which accounts the WatermarkBuffer Factories should - track. - """ - - # The minimum power of two at which Envoy starts tracking an account. Envoy - # has 8 power of two buckets starting with the provided exponent below. - # Concretely the 1st bucket contains accounts for streams that use - # [2^minimum_account_to_track_power_of_two, - # 2^(minimum_account_to_track_power_of_two + 1)) bytes. With the 8th bucket - # tracking accounts >= 128 * 2^minimum_account_to_track_power_of_two. The - # maximum value is 56, since we're using uint64_t for bytes counting, and - # that's the last value that would use the 8 buckets. In practice, we don't - # expect the proxy to be holding 2^56 bytes. If omitted, Envoy should not do - # any tracking. - minimum_account_to_track_power_of_two: int = betterproto.uint32_field(1) - - -@dataclass(eq=False, repr=False) -class OverloadManager(betterproto.Message): - # The interval for refreshing resource usage. - refresh_interval: timedelta = betterproto.message_field(1) - # The set of resources to monitor. - resource_monitors: List["ResourceMonitor"] = betterproto.message_field(2) - # The set of overload actions. - actions: List["OverloadAction"] = betterproto.message_field(3) - # Configuration for buffer factory. - buffer_factory_config: "BufferFactoryConfig" = betterproto.message_field(4) - - -from ....type import v3 as ___type_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/ratelimit/__init__.py b/src/envoy_data_plane/envoy/config/ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/ratelimit/v2/__init__.py b/src/envoy_data_plane/envoy/config/ratelimit/v2/__init__.py deleted file mode 100644 index 93dac7a..0000000 --- a/src/envoy_data_plane/envoy/config/ratelimit/v2/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/ratelimit/v2/rls.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimitServiceConfig(betterproto.Message): - """ - Rate limit :ref:`configuration overview `. - """ - - # Specifies the gRPC service that hosts the rate limit service. The client - # will connect to this cluster when it needs to make rate limit service - # requests. - grpc_service: "___api_v2_core__.GrpcService" = betterproto.message_field(2) - - -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/config/ratelimit/v3/__init__.py deleted file mode 100644 index 52d63ec..0000000 --- a/src/envoy_data_plane/envoy/config/ratelimit/v3/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/ratelimit/v3/rls.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimitServiceConfig(betterproto.Message): - """ - Rate limit :ref:`configuration overview `. - """ - - # Specifies the gRPC service that hosts the rate limit service. The client - # will connect to this cluster when it needs to make rate limit service - # requests. - grpc_service: "__core_v3__.GrpcService" = betterproto.message_field(2) - # API version for rate limit transport protocol. This describes the rate - # limit gRPC endpoint and version of messages used on the wire. - transport_api_version: "__core_v3__.ApiVersion" = betterproto.enum_field(4) - - -from ...core import v3 as __core_v3__ diff --git a/src/envoy_data_plane/envoy/config/rbac/__init__.py b/src/envoy_data_plane/envoy/config/rbac/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/rbac/v2/__init__.py b/src/envoy_data_plane/envoy/config/rbac/v2/__init__.py deleted file mode 100644 index 0549f0d..0000000 --- a/src/envoy_data_plane/envoy/config/rbac/v2/__init__.py +++ /dev/null @@ -1,227 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/rbac/v2/rbac.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class RbacAction(betterproto.Enum): - ALLOW = 0 - DENY = 1 - - -@dataclass(eq=False, repr=False) -class Rbac(betterproto.Message): - """ - Role Based Access Control (RBAC) provides service-level and method-level - access control for a service. RBAC policies are additive. The policies are - examined in order. A request is allowed once a matching policy is found - (suppose the `action` is ALLOW). Here is an example of RBAC configuration. - It has two policies: * Service account "cluster.local/ns/default/sa/admin" - has full access to the service, and so does - "cluster.local/ns/default/sa/superuser". * Any user can read ("GET") the - service at paths with prefix "/products", so long as the destination port - is either 80 or 443. .. code-block:: yaml action: ALLOW policies: - "service-admin": permissions: - any: true principals: - - authenticated: principal_name: exact: - "cluster.local/ns/default/sa/admin" - authenticated: - principal_name: exact: - "cluster.local/ns/default/sa/superuser" "product-viewer": - permissions: - and_rules: rules: - - header: { name: ":method", exact_match: "GET" } - url_path: - path: { prefix: "/products" } - or_rules: - rules: - destination_port: 80 - - destination_port: 443 principals: - any: true - """ - - # The action to take if a policy matches. The request is allowed if and only - # if: * `action` is "ALLOWED" and at least one policy matches * `action` - # is "DENY" and none of the policies match - action: "RbacAction" = betterproto.enum_field(1) - # Maps from policy name to policy. A match occurs when at least one policy - # matches the request. - policies: Dict[str, "Policy"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class Policy(betterproto.Message): - """ - Policy specifies a role and the principals that are assigned/denied the - role. A policy matches if and only if at least one of its permissions match - the action taking place AND at least one of its principals match the - downstream AND the condition is true if specified. - """ - - # Required. The set of permissions that define a role. Each permission is - # matched with OR semantics. To match all actions for this policy, a single - # Permission with the `any` field set to true should be used. - permissions: List["Permission"] = betterproto.message_field(1) - # Required. The set of principals that are assigned/denied the role based on - # “action”. Each principal is matched with OR semantics. To match all - # downstreams for this policy, a single Principal with the `any` field set to - # true should be used. - principals: List["Principal"] = betterproto.message_field(2) - # An optional symbolic expression specifying an access control - # :ref:`condition `. The condition is combined with - # the permissions and the principals as a clause with AND semantics. - condition: "____google_api_expr_v1_alpha1__.Expr" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Permission(betterproto.Message): - """ - Permission defines an action (or actions) that a principal can take. - [#next-free-field: 11] - """ - - # A set of rules that all must match in order to define the action. - and_rules: "PermissionSet" = betterproto.message_field(1, group="rule") - # A set of rules where at least one must match in order to define the action. - or_rules: "PermissionSet" = betterproto.message_field(2, group="rule") - # When any is set, it matches any action. - any: bool = betterproto.bool_field(3, group="rule") - # A header (or pseudo-header such as :path or :method) on the incoming HTTP - # request. Only available for HTTP request. Note: the pseudo-header :path - # includes the query and fragment string. Use the `url_path` field if you - # want to match the URL path without the query and fragment string. - header: "___api_v2_route__.HeaderMatcher" = betterproto.message_field( - 4, group="rule" - ) - # A URL path on the incoming HTTP request. Only available for HTTP. - url_path: "___type_matcher__.PathMatcher" = betterproto.message_field( - 10, group="rule" - ) - # A CIDR block that describes the destination IP. - destination_ip: "___api_v2_core__.CidrRange" = betterproto.message_field( - 5, group="rule" - ) - # A port number that describes the destination port connecting to. - destination_port: int = betterproto.uint32_field(6, group="rule") - # Metadata that describes additional information about the action. - metadata: "___type_matcher__.MetadataMatcher" = betterproto.message_field( - 7, group="rule" - ) - # Negates matching the provided permission. For instance, if the value of - # `not_rule` would match, this permission would not match. Conversely, if the - # value of `not_rule` would not match, this permission would match. - not_rule: "Permission" = betterproto.message_field(8, group="rule") - # The request server from the client's connection request. This is typically - # TLS SNI. .. attention:: The behavior of this field may be affected by how - # Envoy is configured as explained below. * If the :ref:`TLS Inspector - # ` filter is not added, and if a - # `FilterChainMatch` is not defined for the :ref:`server name - # `, a TLS - # connection's requested SNI server name will be treated as if it wasn't - # present. * A :ref:`listener filter ` may - # overwrite a connection's requested server name within Envoy. Please refer - # to :ref:`this FAQ entry ` to learn to setup SNI. - requested_server_name: "___type_matcher__.StringMatcher" = ( - betterproto.message_field(9, group="rule") - ) - - -@dataclass(eq=False, repr=False) -class PermissionSet(betterproto.Message): - """ - Used in the `and_rules` and `or_rules` fields in the `rule` oneof. - Depending on the context, each are applied with the associated behavior. - """ - - rules: List["Permission"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Principal(betterproto.Message): - """ - Principal defines an identity or a group of identities for a downstream - subject. [#next-free-field: 12] - """ - - # A set of identifiers that all must match in order to define the downstream. - and_ids: "PrincipalSet" = betterproto.message_field(1, group="identifier") - # A set of identifiers at least one must match in order to define the - # downstream. - or_ids: "PrincipalSet" = betterproto.message_field(2, group="identifier") - # When any is set, it matches any downstream. - any: bool = betterproto.bool_field(3, group="identifier") - # Authenticated attributes that identify the downstream. - authenticated: "PrincipalAuthenticated" = betterproto.message_field( - 4, group="identifier" - ) - # A CIDR block that describes the downstream IP. This address will honor - # proxy protocol, but will not honor XFF. - source_ip: "___api_v2_core__.CidrRange" = betterproto.message_field( - 5, group="identifier" - ) - # A CIDR block that describes the downstream remote/origin address. Note: - # This is always the physical peer even if the :ref:`remote_ip - # ` is inferred from for - # example the x-forwarder-for header, proxy protocol, etc. - direct_remote_ip: "___api_v2_core__.CidrRange" = betterproto.message_field( - 10, group="identifier" - ) - # A CIDR block that describes the downstream remote/origin address. Note: - # This may not be the physical peer and could be different from the - # :ref:`direct_remote_ip - # `. E.g, if the - # remote ip is inferred from for example the x-forwarder-for header, proxy - # protocol, etc. - remote_ip: "___api_v2_core__.CidrRange" = betterproto.message_field( - 11, group="identifier" - ) - # A header (or pseudo-header such as :path or :method) on the incoming HTTP - # request. Only available for HTTP request. Note: the pseudo-header :path - # includes the query and fragment string. Use the `url_path` field if you - # want to match the URL path without the query and fragment string. - header: "___api_v2_route__.HeaderMatcher" = betterproto.message_field( - 6, group="identifier" - ) - # A URL path on the incoming HTTP request. Only available for HTTP. - url_path: "___type_matcher__.PathMatcher" = betterproto.message_field( - 9, group="identifier" - ) - # Metadata that describes additional information about the principal. - metadata: "___type_matcher__.MetadataMatcher" = betterproto.message_field( - 7, group="identifier" - ) - # Negates matching the provided principal. For instance, if the value of - # `not_id` would match, this principal would not match. Conversely, if the - # value of `not_id` would not match, this principal would match. - not_id: "Principal" = betterproto.message_field(8, group="identifier") - - def __post_init__(self) -> None: - super().__post_init__() - if self.source_ip: - warnings.warn("Principal.source_ip is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class PrincipalSet(betterproto.Message): - """ - Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. - Depending on the context, each are applied with the associated behavior. - """ - - ids: List["Principal"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class PrincipalAuthenticated(betterproto.Message): - """Authentication attributes for a downstream.""" - - # The name of the principal. If set, The URI SAN or DNS SAN in that order is - # used from the certificate, otherwise the subject field is used. If unset, - # it applies to any user that is authenticated. - principal_name: "___type_matcher__.StringMatcher" = betterproto.message_field(2) - - -from .....google.api.expr import v1alpha1 as ____google_api_expr_v1_alpha1__ -from ....api.v2 import core as ___api_v2_core__ -from ....api.v2 import route as ___api_v2_route__ -from ....type import matcher as ___type_matcher__ diff --git a/src/envoy_data_plane/envoy/config/rbac/v3/__init__.py b/src/envoy_data_plane/envoy/config/rbac/v3/__init__.py deleted file mode 100644 index b9fccae..0000000 --- a/src/envoy_data_plane/envoy/config/rbac/v3/__init__.py +++ /dev/null @@ -1,256 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/rbac/v3/rbac.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class RbacAction(betterproto.Enum): - ALLOW = 0 - DENY = 1 - LOG = 2 - - -@dataclass(eq=False, repr=False) -class Rbac(betterproto.Message): - """ - Role Based Access Control (RBAC) provides service-level and method-level - access control for a service. Requests are allowed or denied based on the - `action` and whether a matching policy is found. For instance, if the - action is ALLOW and a matching policy is found the request should be - allowed. RBAC can also be used to make access logging decisions by - communicating with access loggers through dynamic metadata. When the action - is LOG and at least one policy matches, the `access_log_hint` value in the - shared key namespace 'envoy.common' is set to `true` indicating the request - should be logged. Here is an example of RBAC configuration. It has two - policies: * Service account "cluster.local/ns/default/sa/admin" has full - access to the service, and so does - "cluster.local/ns/default/sa/superuser". * Any user can read ("GET") the - service at paths with prefix "/products", so long as the destination port - is either 80 or 443. .. code-block:: yaml action: ALLOW policies: - "service-admin": permissions: - any: true principals: - - authenticated: principal_name: exact: - "cluster.local/ns/default/sa/admin" - authenticated: - principal_name: exact: - "cluster.local/ns/default/sa/superuser" "product-viewer": - permissions: - and_rules: rules: - - header: name: ":method" - string_match: exact: "GET" - - url_path: path: { prefix: "/products" } - - or_rules: rules: - - destination_port: 80 - destination_port: 443 - principals: - any: true - """ - - # The action to take if a policy matches. Every action either allows or - # denies a request, and can also carry out action-specific operations. - # Actions: * ALLOW: Allows the request if and only if there is a policy that - # matches the request. * DENY: Allows the request if and only if there - # are no policies that match the request. * LOG: Allows all requests. If - # at least one policy matches, the dynamic metadata key `access_log_hint` - # is set to the value `true` under the shared key namespace - # 'envoy.common'. If no policies match, it is set to `false`. Other - # actions do not modify this key. - action: "RbacAction" = betterproto.enum_field(1) - # Maps from policy name to policy. A match occurs when at least one policy - # matches the request. The policies are evaluated in lexicographic order of - # the policy name. - policies: Dict[str, "Policy"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class Policy(betterproto.Message): - """ - Policy specifies a role and the principals that are assigned/denied the - role. A policy matches if and only if at least one of its permissions match - the action taking place AND at least one of its principals match the - downstream AND the condition is true if specified. - """ - - # Required. The set of permissions that define a role. Each permission is - # matched with OR semantics. To match all actions for this policy, a single - # Permission with the `any` field set to true should be used. - permissions: List["Permission"] = betterproto.message_field(1) - # Required. The set of principals that are assigned/denied the role based on - # “action”. Each principal is matched with OR semantics. To match all - # downstreams for this policy, a single Principal with the `any` field set to - # true should be used. - principals: List["Principal"] = betterproto.message_field(2) - # An optional symbolic expression specifying an access control - # :ref:`condition `. The condition is combined with - # the permissions and the principals as a clause with AND semantics. Only be - # used when checked_condition is not used. - condition: "____google_api_expr_v1_alpha1__.Expr" = betterproto.message_field(3) - # [#not-implemented-hide:] An optional symbolic expression that has been - # successfully type checked. Only be used when condition is not used. - checked_condition: "____google_api_expr_v1_alpha1__.CheckedExpr" = ( - betterproto.message_field(4) - ) - - -@dataclass(eq=False, repr=False) -class Permission(betterproto.Message): - """ - Permission defines an action (or actions) that a principal can take. - [#next-free-field: 13] - """ - - # A set of rules that all must match in order to define the action. - and_rules: "PermissionSet" = betterproto.message_field(1, group="rule") - # A set of rules where at least one must match in order to define the action. - or_rules: "PermissionSet" = betterproto.message_field(2, group="rule") - # When any is set, it matches any action. - any: bool = betterproto.bool_field(3, group="rule") - # A header (or pseudo-header such as :path or :method) on the incoming HTTP - # request. Only available for HTTP request. Note: the pseudo-header :path - # includes the query and fragment string. Use the `url_path` field if you - # want to match the URL path without the query and fragment string. - header: "__route_v3__.HeaderMatcher" = betterproto.message_field(4, group="rule") - # A URL path on the incoming HTTP request. Only available for HTTP. - url_path: "___type_matcher_v3__.PathMatcher" = betterproto.message_field( - 10, group="rule" - ) - # A CIDR block that describes the destination IP. - destination_ip: "__core_v3__.CidrRange" = betterproto.message_field(5, group="rule") - # A port number that describes the destination port connecting to. - destination_port: int = betterproto.uint32_field(6, group="rule") - # A port number range that describes a range of destination ports connecting - # to. - destination_port_range: "___type_v3__.Int32Range" = betterproto.message_field( - 11, group="rule" - ) - # Metadata that describes additional information about the action. - metadata: "___type_matcher_v3__.MetadataMatcher" = betterproto.message_field( - 7, group="rule" - ) - # Negates matching the provided permission. For instance, if the value of - # `not_rule` would match, this permission would not match. Conversely, if the - # value of `not_rule` would not match, this permission would match. - not_rule: "Permission" = betterproto.message_field(8, group="rule") - # The request server from the client's connection request. This is typically - # TLS SNI. .. attention:: The behavior of this field may be affected by how - # Envoy is configured as explained below. * If the :ref:`TLS Inspector - # ` filter is not added, and if a - # `FilterChainMatch` is not defined for the :ref:`server name - # `, - # a TLS connection's requested SNI server name will be treated as if it - # wasn't present. * A :ref:`listener filter - # ` may overwrite a connection's - # requested server name within Envoy. Please refer to :ref:`this FAQ entry - # ` to learn to setup SNI. - requested_server_name: "___type_matcher_v3__.StringMatcher" = ( - betterproto.message_field(9, group="rule") - ) - # Extension for configuring custom matchers for RBAC. [#extension-category: - # envoy.rbac.matchers] - matcher: "__core_v3__.TypedExtensionConfig" = betterproto.message_field( - 12, group="rule" - ) - - -@dataclass(eq=False, repr=False) -class PermissionSet(betterproto.Message): - """ - Used in the `and_rules` and `or_rules` fields in the `rule` oneof. - Depending on the context, each are applied with the associated behavior. - """ - - rules: List["Permission"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Principal(betterproto.Message): - """ - Principal defines an identity or a group of identities for a downstream - subject. [#next-free-field: 12] - """ - - # A set of identifiers that all must match in order to define the downstream. - and_ids: "PrincipalSet" = betterproto.message_field(1, group="identifier") - # A set of identifiers at least one must match in order to define the - # downstream. - or_ids: "PrincipalSet" = betterproto.message_field(2, group="identifier") - # When any is set, it matches any downstream. - any: bool = betterproto.bool_field(3, group="identifier") - # Authenticated attributes that identify the downstream. - authenticated: "PrincipalAuthenticated" = betterproto.message_field( - 4, group="identifier" - ) - # A CIDR block that describes the downstream IP. This address will honor - # proxy protocol, but will not honor XFF. - source_ip: "__core_v3__.CidrRange" = betterproto.message_field( - 5, group="identifier" - ) - # A CIDR block that describes the downstream remote/origin address. Note: - # This is always the physical peer even if the :ref:`remote_ip - # ` is inferred from - # for example the x-forwarder-for header, proxy protocol, etc. - direct_remote_ip: "__core_v3__.CidrRange" = betterproto.message_field( - 10, group="identifier" - ) - # A CIDR block that describes the downstream remote/origin address. Note: - # This may not be the physical peer and could be different from the - # :ref:`direct_remote_ip - # `. E.g, if - # the remote ip is inferred from for example the x-forwarder-for header, - # proxy protocol, etc. - remote_ip: "__core_v3__.CidrRange" = betterproto.message_field( - 11, group="identifier" - ) - # A header (or pseudo-header such as :path or :method) on the incoming HTTP - # request. Only available for HTTP request. Note: the pseudo-header :path - # includes the query and fragment string. Use the `url_path` field if you - # want to match the URL path without the query and fragment string. - header: "__route_v3__.HeaderMatcher" = betterproto.message_field( - 6, group="identifier" - ) - # A URL path on the incoming HTTP request. Only available for HTTP. - url_path: "___type_matcher_v3__.PathMatcher" = betterproto.message_field( - 9, group="identifier" - ) - # Metadata that describes additional information about the principal. - metadata: "___type_matcher_v3__.MetadataMatcher" = betterproto.message_field( - 7, group="identifier" - ) - # Negates matching the provided principal. For instance, if the value of - # `not_id` would match, this principal would not match. Conversely, if the - # value of `not_id` would not match, this principal would match. - not_id: "Principal" = betterproto.message_field(8, group="identifier") - - def __post_init__(self) -> None: - super().__post_init__() - if self.source_ip: - warnings.warn("Principal.source_ip is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class PrincipalSet(betterproto.Message): - """ - Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. - Depending on the context, each are applied with the associated behavior. - """ - - ids: List["Principal"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class PrincipalAuthenticated(betterproto.Message): - """Authentication attributes for a downstream.""" - - # The name of the principal. If set, The URI SAN or DNS SAN in that order is - # used from the certificate, otherwise the subject field is used. If unset, - # it applies to any user that is authenticated. - principal_name: "___type_matcher_v3__.StringMatcher" = betterproto.message_field(2) - - -from .....google.api.expr import v1alpha1 as ____google_api_expr_v1_alpha1__ -from ....type import v3 as ___type_v3__ -from ....type.matcher import v3 as ___type_matcher_v3__ -from ...core import v3 as __core_v3__ -from ...route import v3 as __route_v3__ diff --git a/src/envoy_data_plane/envoy/config/resource_monitor/__init__.py b/src/envoy_data_plane/envoy/config/resource_monitor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/resource_monitor/fixed_heap/__init__.py b/src/envoy_data_plane/envoy/config/resource_monitor/fixed_heap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/resource_monitor/fixed_heap/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/resource_monitor/fixed_heap/v2alpha/__init__.py deleted file mode 100644 index 639d07a..0000000 --- a/src/envoy_data_plane/envoy/config/resource_monitor/fixed_heap/v2alpha/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FixedHeapConfig(betterproto.Message): - """ - The fixed heap resource monitor reports the Envoy process memory pressure, - computed as a fraction of currently reserved heap memory divided by a - statically configured maximum specified in the FixedHeapConfig. - """ - - max_heap_size_bytes: int = betterproto.uint64_field(1) diff --git a/src/envoy_data_plane/envoy/config/resource_monitor/injected_resource/__init__.py b/src/envoy_data_plane/envoy/config/resource_monitor/injected_resource/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/resource_monitor/injected_resource/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/resource_monitor/injected_resource/v2alpha/__init__.py deleted file mode 100644 index 17bcc3f..0000000 --- a/src/envoy_data_plane/envoy/config/resource_monitor/injected_resource/v2alpha/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class InjectedResourceConfig(betterproto.Message): - """ - The injected resource monitor allows injecting a synthetic resource - pressure into Envoy via a text file, which must contain a floating-point - number in the range [0..1] representing the resource pressure and be - updated atomically by a symbolic link swap. This is intended primarily for - integration tests to force Envoy into an overloaded state. - """ - - filename: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/config/retry/__init__.py b/src/envoy_data_plane/envoy/config/retry/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/retry/omit_canary_hosts/__init__.py b/src/envoy_data_plane/envoy/config/retry/omit_canary_hosts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/retry/omit_canary_hosts/v2/__init__.py b/src/envoy_data_plane/envoy/config/retry/omit_canary_hosts/v2/__init__.py deleted file mode 100644 index 09c0f31..0000000 --- a/src/envoy_data_plane/envoy/config/retry/omit_canary_hosts/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OmitCanaryHostsPredicate(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/retry/omit_host_metadata/__init__.py b/src/envoy_data_plane/envoy/config/retry/omit_host_metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/retry/omit_host_metadata/v2/__init__.py b/src/envoy_data_plane/envoy/config/retry/omit_host_metadata/v2/__init__.py deleted file mode 100644 index cc57449..0000000 --- a/src/envoy_data_plane/envoy/config/retry/omit_host_metadata/v2/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OmitHostMetadataConfig(betterproto.Message): - """ - A retry host predicate that can be used to reject a host based on - predefined metadata match criteria. [#extension: - envoy.retry_host_predicates.omit_host_metadata] - """ - - # Retry host predicate metadata match criteria. The hosts in the upstream - # cluster with matching metadata will be omitted while attempting a retry of - # a failed request. The metadata should be specified under the *envoy.lb* - # key. - metadata_match: "____api_v2_core__.Metadata" = betterproto.message_field(1) - - -from .....api.v2 import core as ____api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/retry/previous_hosts/__init__.py b/src/envoy_data_plane/envoy/config/retry/previous_hosts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/retry/previous_hosts/v2/__init__.py b/src/envoy_data_plane/envoy/config/retry/previous_hosts/v2/__init__.py deleted file mode 100644 index 4b93272..0000000 --- a/src/envoy_data_plane/envoy/config/retry/previous_hosts/v2/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/retry/previous_hosts/v2/previous_hosts.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class PreviousHostsPredicate(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/config/retry/previous_priorities/__init__.py b/src/envoy_data_plane/envoy/config/retry/previous_priorities/__init__.py deleted file mode 100644 index 3fa9ab5..0000000 --- a/src/envoy_data_plane/envoy/config/retry/previous_priorities/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/retry/previous_priorities/previous_priorities_config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class PreviousPrioritiesConfig(betterproto.Message): - """ - A retry host selector that attempts to spread retries between priorities, - even if certain priorities would not normally be attempted due to higher - priorities being available. As priorities get excluded, load will be - distributed amongst the remaining healthy priorities based on the relative - health of the priorities, matching how load is distributed during regular - host selection. For example, given priority healths of {100, 50, 50}, the - original load will be {100, 0, 0} (since P0 has capacity to handle 100% of - the traffic). If P0 is excluded, the load changes to {0, 50, 50}, because - P1 is only able to handle 50% of the traffic, causing the remaining to - spill over to P2. Each priority attempted will be excluded until there are - no healthy priorities left, at which point the list of attempted priorities - will be reset, essentially starting from the beginning. For example, given - three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, - the following sequence of priorities would be selected (assuming - update_frequency = 1): Attempt 1: P0 (P0 is 100% healthy) Attempt 2: P2 (P0 - already attempted, P2 only healthy priority) Attempt 3: P0 (no healthy - priorities, reset) Attempt 4: P2 In the case of all upstream hosts being - unhealthy, no adjustments will be made to the original priority load, so - behavior should be identical to not using this plugin. Using this - PriorityFilter requires rebuilding the priority load, which runs in O(# of - priorities), which might incur significant overhead for clusters with many - priorities. [#extension: envoy.retry_priorities.previous_priorities] - """ - - # How often the priority load should be updated based on previously attempted - # priorities. Useful to allow each priorities to receive more than one - # request before being excluded or to reduce the number of times that the - # priority load has to be recomputed. For example, by setting this to 2, then - # the first two attempts (initial attempt and first retry) will use the - # unmodified priority load. The third and fourth attempt will use priority - # load which excludes the priorities routed to with the first two attempts, - # and the fifth and sixth attempt will use the priority load excluding the - # priorities used for the first four attempts. Must be greater than 0. - update_frequency: int = betterproto.int32_field(1) diff --git a/src/envoy_data_plane/envoy/config/route/__init__.py b/src/envoy_data_plane/envoy/config/route/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/route/v3/__init__.py b/src/envoy_data_plane/envoy/config/route/v3/__init__.py deleted file mode 100644 index fa8cdf2..0000000 --- a/src/envoy_data_plane/envoy/config/route/v3/__init__.py +++ /dev/null @@ -1,2077 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/route/v3/route.proto, envoy/config/route/v3/route_components.proto, envoy/config/route/v3/scoped_route.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class VirtualHostTlsRequirementType(betterproto.Enum): - NONE = 0 - EXTERNAL_ONLY = 1 - ALL = 2 - - -class RouteActionClusterNotFoundResponseCode(betterproto.Enum): - SERVICE_UNAVAILABLE = 0 - NOT_FOUND = 1 - - -class RouteActionInternalRedirectAction(betterproto.Enum): - PASS_THROUGH_INTERNAL_REDIRECT = 0 - HANDLE_INTERNAL_REDIRECT = 1 - - -class RetryPolicyResetHeaderFormat(betterproto.Enum): - SECONDS = 0 - UNIX_TIMESTAMP = 1 - - -class RedirectActionRedirectResponseCode(betterproto.Enum): - MOVED_PERMANENTLY = 0 - FOUND = 1 - SEE_OTHER = 2 - TEMPORARY_REDIRECT = 3 - PERMANENT_REDIRECT = 4 - - -class RateLimitActionMetaDataSource(betterproto.Enum): - DYNAMIC = 0 - ROUTE_ENTRY = 1 - - -@dataclass(eq=False, repr=False) -class VirtualHost(betterproto.Message): - """ - The top level element in the routing configuration is a virtual host. Each - virtual host has a logical name as well as a set of domains that get routed - to it based on the incoming request's host header. This allows a single - listener to service multiple top level domain path trees. Once a virtual - host is selected based on the domain, the routes are processed in order to - see which upstream cluster to route to or whether to perform a redirect. - [#next-free-field: 22] - """ - - # The logical name of the virtual host. This is used when emitting certain - # statistics but is not relevant for routing. - name: str = betterproto.string_field(1) - # A list of domains (host/authority header) that will be matched to this - # virtual host. Wildcard hosts are supported in the suffix or prefix form. - # Domain search order: 1. Exact domain names: ``www.foo.com``. 2. Suffix - # domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. 3. Prefix domain - # wildcards: ``foo.*`` or ``foo-*``. 4. Special wildcard ``*`` matching any - # domain. .. note:: The wildcard will not match the empty string. e.g. - # ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - # The longest wildcards match first. Only a single virtual host in the - # entire route configuration can match on ``*``. A domain must be unique - # across all virtual hosts or the config will fail to load. Domains cannot - # contain control characters. This is validated by the well_known_regex - # HTTP_HEADER_VALUE. - domains: List[str] = betterproto.string_field(2) - # The list of routes that will be matched, in order, for incoming requests. - # The first route that matches will be used. Only one of this and `matcher` - # can be specified. - routes: List["Route"] = betterproto.message_field(3) - # [#next-major-version: This should be included in a oneof with routes - # wrapped in a message.] The match tree to use when resolving route actions - # for incoming requests. Only one of this and `routes` can be specified. - matcher: "____xds_type_matcher_v3__.Matcher" = betterproto.message_field(21) - # Specifies the type of TLS enforcement the virtual host expects. If this - # option is not specified, there is no TLS requirement for the virtual host. - require_tls: "VirtualHostTlsRequirementType" = betterproto.enum_field(4) - # A list of virtual clusters defined for this virtual host. Virtual clusters - # are used for additional statistics gathering. - virtual_clusters: List["VirtualCluster"] = betterproto.message_field(5) - # Specifies a set of rate limit configurations that will be applied to the - # virtual host. - rate_limits: List["RateLimit"] = betterproto.message_field(6) - # Specifies a list of HTTP headers that should be added to each request - # handled by this virtual host. Headers specified at this level are applied - # after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` - # and before headers from the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more - # information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - request_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(7) - # Specifies a list of HTTP headers that should be removed from each request - # handled by this virtual host. - request_headers_to_remove: List[str] = betterproto.string_field(13) - # Specifies a list of HTTP headers that should be added to each response - # handled by this virtual host. Headers specified at this level are applied - # after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` - # and before headers from the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more - # information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - response_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(10) - # Specifies a list of HTTP headers that should be removed from each response - # handled by this virtual host. - response_headers_to_remove: List[str] = betterproto.string_field(11) - # Indicates that the virtual host has a CORS policy. - cors: "CorsPolicy" = betterproto.message_field(8) - # The per_filter_config field can be used to provide virtual host-specific - # configurations for filters. The key should match the filter name, such as - # *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field - # is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. [#comment: An entry's - # value may be wrapped in a - # :ref:`FilterConfig` message - # to specify additional options.] - typed_per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(15, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Decides whether the :ref:`x-envoy-attempt-count - # ` header should be - # included in the upstream request. Setting this option will cause it to - # override any existing header value, so in the case of two Envoys on the - # request path with this option enabled, the upstream will see the attempt - # count as perceived by the second Envoy. Defaults to false. This header is - # unaffected by the :ref:`suppress_envoy_headers ` flag. [#next- - # major-version: rename to include_attempt_count_in_request.] - include_request_attempt_count: bool = betterproto.bool_field(14) - # Decides whether the :ref:`x-envoy-attempt-count - # ` header should be - # included in the downstream response. Setting this option will cause the - # router to override any existing header value, so in the case of two Envoys - # on the request path with this option enabled, the downstream will see the - # attempt count as perceived by the Envoy closest upstream from itself. - # Defaults to false. This header is unaffected by the - # :ref:`suppress_envoy_headers ` flag. - include_attempt_count_in_response: bool = betterproto.bool_field(19) - # Indicates the retry policy for all routes in this virtual host. Note that - # setting a route level entry will take precedence over this config and it'll - # be treated independently (e.g.: values are not inherited). - retry_policy: "RetryPolicy" = betterproto.message_field(16) - # [#not-implemented-hide:] Specifies the configuration for retry policy - # extension. Note that setting a route level entry will take precedence over - # this config and it'll be treated independently (e.g.: values are not - # inherited). :ref:`Retry policy - # ` should not - # be set if this field is used. - retry_policy_typed_config: "betterproto_lib_google_protobuf.Any" = ( - betterproto.message_field(20) - ) - # Indicates the hedge policy for all routes in this virtual host. Note that - # setting a route level entry will take precedence over this config and it'll - # be treated independently (e.g.: values are not inherited). - hedge_policy: "HedgePolicy" = betterproto.message_field(17) - # The maximum bytes which will be buffered for retries and shadowing. If set - # and a route-specific limit is not set, the bytes actually buffered will be - # the minimum value of this and the listener - # per_connection_buffer_limit_bytes. - per_request_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 18, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class FilterAction(betterproto.Message): - """A filter-defined action type.""" - - action: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Route(betterproto.Message): - """ - A route is both a specification of how to match a request as well as an - indication of what to do next (e.g., redirect, forward, rewrite, etc.). .. - attention:: Envoy supports routing on HTTP method via :ref:`header - matching `. [#next-free- - field: 19] - """ - - # Name for the route. - name: str = betterproto.string_field(14) - # Route matching parameters. - match: "RouteMatch" = betterproto.message_field(1) - # Route request to some upstream cluster. - route: "RouteAction" = betterproto.message_field(2, group="action") - # Return a redirect. - redirect: "RedirectAction" = betterproto.message_field(3, group="action") - # Return an arbitrary HTTP response directly, without proxying. - direct_response: "DirectResponseAction" = betterproto.message_field( - 7, group="action" - ) - # [#not-implemented-hide:] A filter-defined action (e.g., it could - # dynamically generate the RouteAction). [#comment: TODO(samflattery): Remove - # cleanup in route_fuzz_test.cc when implemented] - filter_action: "FilterAction" = betterproto.message_field(17, group="action") - # [#not-implemented-hide:] An action used when the route will generate a - # response directly, without forwarding to an upstream host. This will be - # used in non-proxy xDS clients like the gRPC server. It could also be used - # in the future in Envoy for a filter that directly generates responses for - # requests. - non_forwarding_action: "NonForwardingAction" = betterproto.message_field( - 18, group="action" - ) - # The Metadata field can be used to provide additional information about the - # route. It can be used for configuration, stats, and logging. The metadata - # should go under the filter namespace that will need it. For instance, if - # the metadata is intended for the Router filter, the filter name should be - # specified as *envoy.filters.http.router*. - metadata: "__core_v3__.Metadata" = betterproto.message_field(4) - # Decorator for the matched route. - decorator: "Decorator" = betterproto.message_field(5) - # The typed_per_filter_config field can be used to provide route-specific - # configurations for filters. The key should match the filter name, such as - # *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field - # is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. [#comment: An entry's - # value may be wrapped in a - # :ref:`FilterConfig` message - # to specify additional options.] - typed_per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(13, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Specifies a set of headers that will be added to requests matching this - # route. Headers specified at this level are applied before headers from the - # enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more - # information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - request_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(9) - # Specifies a list of HTTP headers that should be removed from each request - # matching this route. - request_headers_to_remove: List[str] = betterproto.string_field(12) - # Specifies a set of headers that will be added to responses to requests - # matching this route. Headers specified at this level are applied before - # headers from the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more - # information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - response_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(10) - # Specifies a list of HTTP headers that should be removed from each response - # to requests matching this route. - response_headers_to_remove: List[str] = betterproto.string_field(11) - # Presence of the object defines whether the connection manager's tracing - # configuration is overridden by this route specific instance. - tracing: "Tracing" = betterproto.message_field(15) - # The maximum bytes which will be buffered for retries and shadowing. If set, - # the bytes actually buffered will be the minimum value of this and the - # listener per_connection_buffer_limit_bytes. - per_request_buffer_limit_bytes: Optional[int] = betterproto.message_field( - 16, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class WeightedCluster(betterproto.Message): - """ - Compared to the :ref:`cluster - ` field that - specifies a single upstream cluster as the target of a request, the - :ref:`weighted_clusters - ` option - allows for specification of multiple upstream clusters along with weights - that indicate the percentage of traffic to be forwarded to each cluster. - The router selects an upstream cluster based on the weights. - """ - - # Specifies one or more upstream clusters associated with the route. - clusters: List["WeightedClusterClusterWeight"] = betterproto.message_field(1) - # Specifies the total weight across all clusters. The sum of all cluster - # weights must equal this value, which must be greater than 0. Defaults to - # 100. - total_weight: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Specifies the runtime key prefix that should be used to construct the - # runtime keys associated with each cluster. When the *runtime_key_prefix* is - # specified, the router will look for weights associated with each upstream - # cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - # *cluster[i]* denotes an entry in the clusters array field. If the runtime - # key for the cluster does not exist, the value specified in the - # configuration file will be used as the default weight. See the - # :ref:`runtime documentation ` for how key names map to - # the underlying implementation. - runtime_key_prefix: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class WeightedClusterClusterWeight(betterproto.Message): - """[#next-free-field: 13]""" - - # Only one of *name* and *cluster_header* may be specified. [#next-major- - # version: Need to add back the validation rule: (validate.rules).string = - # {min_len: 1}] Name of the upstream cluster. The cluster must exist in the - # :ref:`cluster manager configuration `. - name: str = betterproto.string_field(1) - # Only one of *name* and *cluster_header* may be specified. [#next-major- - # version: Need to add back the validation rule: (validate.rules).string = - # {min_len: 1 }] Envoy will determine the cluster to route to by reading the - # value of the HTTP header named by cluster_header from the request headers. - # If the header is not found or the referenced cluster does not exist, Envoy - # will return a 404 response. .. attention:: Internally, Envoy always uses - # the HTTP/2 *:authority* header to represent the HTTP/1 *Host* header. - # Thus, if attempting to match on *Host*, match on *:authority* instead. .. - # note:: If the header appears multiple times only the first value is used. - cluster_header: str = betterproto.string_field(12) - # An integer between 0 and :ref:`total_weight - # `. When a - # request matches the route, the choice of an upstream cluster is determined - # by its weight. The sum of weights across all entries in the clusters array - # must add up to the total_weight, which defaults to 100. - weight: Optional[int] = betterproto.message_field(2, wraps=betterproto.TYPE_UINT32) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field will be considered for load balancing. Note that this will be - # merged with what's provided in :ref:`RouteAction.metadata_match - # `, with - # values here taking precedence. The filter name should be specified as - # *envoy.lb*. - metadata_match: "__core_v3__.Metadata" = betterproto.message_field(3) - # Specifies a list of headers to be added to requests when this cluster is - # selected through the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. Headers specified at - # this level are applied before headers from the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.Route`, - # :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more - # information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - request_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(4) - # Specifies a list of HTTP headers that should be removed from each request - # when this cluster is selected through the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - request_headers_to_remove: List[str] = betterproto.string_field(9) - # Specifies a list of headers to be added to responses when this cluster is - # selected through the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. Headers specified at - # this level are applied before headers from the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.Route`, - # :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more - # information, including details on header value syntax, see the - # documentation on :ref:`custom request headers - # `. - response_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(5) - # Specifies a list of headers to be removed from responses when this cluster - # is selected through the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - response_headers_to_remove: List[str] = betterproto.string_field(6) - # The per_filter_config field can be used to provide weighted cluster- - # specific configurations for filters. The key should match the filter name, - # such as *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this - # field is filter specific; see the :ref:`HTTP filter documentation - # ` for if and how it is utilized. [#comment: An entry's - # value may be wrapped in a - # :ref:`FilterConfig` message - # to specify additional options.] - typed_per_filter_config: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(10, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Indicates that during forwarding, the host header will be swapped with this - # value. - host_rewrite_literal: str = betterproto.string_field( - 11, group="host_rewrite_specifier" - ) - - -@dataclass(eq=False, repr=False) -class RouteMatch(betterproto.Message): - """[#next-free-field: 14]""" - - # If specified, the route is a prefix rule meaning that the prefix must match - # the beginning of the *:path* header. - prefix: str = betterproto.string_field(1, group="path_specifier") - # If specified, the route is an exact path rule meaning that the path must - # exactly match the *:path* header once the query string is removed. - path: str = betterproto.string_field(2, group="path_specifier") - # If specified, the route is a regular expression rule meaning that the regex - # must match the *:path* header once the query string is removed. The entire - # path (without the query string) must match the regex. The rule will not - # match if only a subsequence of the *:path* header matches the regex. - # [#next-major-version: In the v3 API we should redo how path specification - # works such that we utilize StringMatcher, and additionally have consistent - # options around whether we strip query strings, do a case sensitive match, - # etc. In the interim it will be too disruptive to deprecate the existing - # options. We should even consider whether we want to do away with - # path_specifier entirely and just rely on a set of header matchers which can - # already match on :path, etc. The issue with that is it is unclear how to - # generically deal with query string stripping. This needs more thought.] - safe_regex: "___type_matcher_v3__.RegexMatcher" = betterproto.message_field( - 10, group="path_specifier" - ) - # If this is used as the matcher, the matcher will only match CONNECT - # requests. Note that this will not match HTTP/2 upgrade-style CONNECT - # requests (WebSocket and the like) as they are normalized in Envoy as - # HTTP/1.1 style upgrades. This is the only way to match CONNECT requests for - # HTTP/1.1. For HTTP/2, where Extended CONNECT requests may have a path, the - # path matchers will work if there is a path present. Note that CONNECT - # support is currently considered alpha in Envoy. [#comment: TODO(htuch): - # Replace the above comment with an alpha tag.] - connect_matcher: "RouteMatchConnectMatcher" = betterproto.message_field( - 12, group="path_specifier" - ) - # Indicates that prefix/path matching should be case sensitive. The default - # is true. Ignored for safe_regex matching. - case_sensitive: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # Indicates that the route should additionally match on a runtime key. Every - # time the route is considered for a match, it must also fall under the - # percentage of matches indicated by this field. For some fraction N/D, a - # random number in the range [0,D) is selected. If the number is <= the value - # of the numerator N, or if the key is not present, the default value, the - # router continues to evaluate the remaining match criteria. A - # runtime_fraction route configuration can be used to roll out route changes - # in a gradual manner without full code/config deploys. Refer to the - # :ref:`traffic shifting - # ` docs for - # additional documentation. .. note:: Parsing this field is implemented - # such that the runtime key's data may be represented as a - # FractionalPercent proto represented as JSON/YAML and may also be - # represented as an integer with the assumption that the value is an - # integral percentage out of 100. For instance, a runtime key lookup - # returning the value "42" would parse as a FractionalPercent whose - # numerator is 42 and denominator is HUNDRED. This preserves legacy - # semantics. - runtime_fraction: "__core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(9) - ) - # Specifies a set of headers that the route should match on. The router will - # check the request’s headers against all the specified headers in the route - # config. A match will happen if all the headers in the route are present in - # the request with the same values (or based on presence if the value field - # is not in the config). - headers: List["HeaderMatcher"] = betterproto.message_field(6) - # Specifies a set of URL query parameters on which the route should match. - # The router will check the query string from the *path* header against all - # the specified query parameters. If the number of specified query parameters - # is nonzero, they all must match the *path* header's query string for a - # match to occur. .. note:: If query parameters are used to pass request - # message fields when `grpc_json_transcoder - # `_ is used, the transcoded message fields maybe different. The query - # parameters are url encoded, but the message fields are not. For example, - # if a query parameter is "foo%20bar", the message field will be "foo - # bar". - query_parameters: List["QueryParameterMatcher"] = betterproto.message_field(7) - # If specified, only gRPC requests will be matched. The router will check - # that the content-type header has a application/grpc or one of the various - # application/grpc+ values. - grpc: "RouteMatchGrpcRouteMatchOptions" = betterproto.message_field(8) - # If specified, the client tls context will be matched against the defined - # match options. [#next-major-version: unify with RBAC] - tls_context: "RouteMatchTlsContextMatchOptions" = betterproto.message_field(11) - # Specifies a set of dynamic metadata matchers on which the route should - # match. The router will check the dynamic metadata against all the specified - # dynamic metadata matchers. If the number of specified dynamic metadata - # matchers is nonzero, they all must match the dynamic metadata for a match - # to occur. - dynamic_metadata: List[ - "___type_matcher_v3__.MetadataMatcher" - ] = betterproto.message_field(13) - - -@dataclass(eq=False, repr=False) -class RouteMatchGrpcRouteMatchOptions(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class RouteMatchTlsContextMatchOptions(betterproto.Message): - # If specified, the route will match against whether or not a certificate is - # presented. If not specified, certificate presentation status (true or - # false) will not be considered when route matching. - presented: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # If specified, the route will match against whether or not a certificate is - # validated. If not specified, certificate validation status (true or false) - # will not be considered when route matching. - validated: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class RouteMatchConnectMatcher(betterproto.Message): - """An extensible message for matching CONNECT requests.""" - - pass - - -@dataclass(eq=False, repr=False) -class CorsPolicy(betterproto.Message): - """[#next-free-field: 12]""" - - # Specifies string patterns that match allowed origins. An origin is allowed - # if any of the string matchers match. - allow_origin_string_match: List[ - "___type_matcher_v3__.StringMatcher" - ] = betterproto.message_field(11) - # Specifies the content for the *access-control-allow-methods* header. - allow_methods: str = betterproto.string_field(2) - # Specifies the content for the *access-control-allow-headers* header. - allow_headers: str = betterproto.string_field(3) - # Specifies the content for the *access-control-expose-headers* header. - expose_headers: str = betterproto.string_field(4) - # Specifies the content for the *access-control-max-age* header. - max_age: str = betterproto.string_field(5) - # Specifies whether the resource allows credentials. - allow_credentials: Optional[bool] = betterproto.message_field( - 6, wraps=betterproto.TYPE_BOOL - ) - # Specifies the % of requests for which the CORS filter is enabled. If - # neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are - # specified, the CORS filter will be enabled for 100% of the requests. If - # :ref:`runtime_key - # ` - # is specified, Envoy will lookup the runtime key to get the percentage of - # requests to filter. - filter_enabled: "__core_v3__.RuntimeFractionalPercent" = betterproto.message_field( - 9, group="enabled_specifier" - ) - # Specifies the % of requests for which the CORS policies will be evaluated - # and tracked, but not enforced. This field is intended to be used when - # ``filter_enabled`` and ``enabled`` are off. One of those fields have to - # explicitly disable the filter in order for this setting to take effect. If - # :ref:`runtime_key - # ` - # is specified, Envoy will lookup the runtime key to get the percentage of - # requests for which it will evaluate and track the request's *Origin* to - # determine if it's valid but will not enforce any policies. - shadow_enabled: "__core_v3__.RuntimeFractionalPercent" = betterproto.message_field( - 10 - ) - - -@dataclass(eq=False, repr=False) -class RouteAction(betterproto.Message): - """[#next-free-field: 39]""" - - # Indicates the upstream cluster to which the request should be routed to. - cluster: str = betterproto.string_field(1, group="cluster_specifier") - # Envoy will determine the cluster to route to by reading the value of the - # HTTP header named by cluster_header from the request headers. If the header - # is not found or the referenced cluster does not exist, Envoy will return a - # 404 response. .. attention:: Internally, Envoy always uses the HTTP/2 - # *:authority* header to represent the HTTP/1 *Host* header. Thus, if - # attempting to match on *Host*, match on *:authority* instead. .. note:: - # If the header appears multiple times only the first value is used. - cluster_header: str = betterproto.string_field(2, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. See :ref:`traffic splitting - # ` for additional - # documentation. - weighted_clusters: "WeightedCluster" = betterproto.message_field( - 3, group="cluster_specifier" - ) - # [#not-implemented-hide:] Name of the cluster specifier plugin to use to - # determine the cluster for requests on this route. The plugin name must be - # defined in the associated :ref:`envoy_v3_api_field_config.route.v3.RouteCon - # figuration.cluster_specifier_plugins` in the - # :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. - cluster_specifier_plugin: str = betterproto.string_field( - 37, group="cluster_specifier" - ) - # The HTTP status code to use when configured cluster is not found. The - # default response code is 503 Service Unavailable. - cluster_not_found_response_code: "RouteActionClusterNotFoundResponseCode" = ( - betterproto.enum_field(20) - ) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what's set in - # this field will be considered for load balancing. If using - # :ref:`weighted_clusters - # `, - # metadata will be merged, with values provided there taking precedence. The - # filter name should be specified as *envoy.lb*. - metadata_match: "__core_v3__.Metadata" = betterproto.message_field(4) - # Indicates that during forwarding, the matched prefix (or path) should be - # swapped with this value. This option allows application URLs to be rooted - # at a different path from those exposed at the reverse proxy layer. The - # router filter will place the original path before rewrite into the - # :ref:`x-envoy-original-path ` header. Only one of *prefix_rewrite* or :ref:`regex_rewrite - # ` may be - # specified. .. attention:: Pay careful attention to the use of trailing - # slashes in the :ref:`route's match - # ` prefix value. Stripping - # a prefix from a path requires multiple Routes to handle all cases. For - # example, rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be - # done in a single :ref:`Route `, - # as shown by the below config entries: .. code-block:: yaml - match: - # prefix: "/prefix/" route: prefix_rewrite: "/" - match: - # prefix: "/prefix" route: prefix_rewrite: "/" Having above - # entries in the config, requests to */prefix* will be stripped to */*, while - # requests to */prefix/etc* will be stripped to */etc*. - prefix_rewrite: str = betterproto.string_field(5) - # Indicates that during forwarding, portions of the path that match the - # pattern should be rewritten, even allowing the substitution of capture - # groups from the pattern into the new path as specified by the rewrite - # substitution string. This is useful to allow application paths to be - # rewritten in a way that is aware of segments with variable content like - # identifiers. The router filter will place the original path as it was - # before the rewrite into the :ref:`x-envoy-original-path - # ` header. Only one of - # :ref:`prefix_rewrite - # ` or - # *regex_rewrite* may be specified. Examples using Google's `RE2 - # `_ engine: * The path pattern - # ``^/service/([^/]+)(/.*)$`` paired with a substitution string of - # ``\2/instance/\1`` would transform ``/service/foo/v1/api`` into - # ``/v1/api/instance/foo``. * The pattern ``one`` paired with a substitution - # string of ``two`` would transform ``/xxx/one/yyy/one/zzz`` into - # ``/xxx/two/yyy/two/zzz``. * The pattern ``^(.*?)one(.*)$`` paired with a - # substitution string of ``\1two\2`` would replace only the first - # occurrence of ``one``, transforming path ``/xxx/one/yyy/one/zzz`` into - # ``/xxx/two/yyy/one/zzz``. * The pattern ``(?i)/xxx/`` paired with a - # substitution string of ``/yyy/`` would do a case-insensitive match and - # transform path ``/aaa/XxX/bbb`` to ``/aaa/yyy/bbb``. - regex_rewrite: "___type_matcher_v3__.RegexMatchAndSubstitute" = ( - betterproto.message_field(32) - ) - # Indicates that during forwarding, the host header will be swapped with this - # value. Using this option will append the - # :ref:`config_http_conn_man_headers_x-forwarded-host` header if - # :ref:`append_x_forwarded_host - # ` - # is set. - host_rewrite_literal: str = betterproto.string_field( - 6, group="host_rewrite_specifier" - ) - # Indicates that during forwarding, the host header will be swapped with the - # hostname of the upstream host chosen by the cluster manager. This option is - # applicable only when the destination cluster for a route is of type - # *strict_dns* or *logical_dns*. Setting this to true with other cluster - # types has no effect. Using this option will append the - # :ref:`config_http_conn_man_headers_x-forwarded-host` header if - # :ref:`append_x_forwarded_host - # ` - # is set. - auto_host_rewrite: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL, group="host_rewrite_specifier" - ) - # Indicates that during forwarding, the host header will be swapped with the - # content of given downstream or :ref:`custom - # ` header. If header - # value is empty, host header is left intact. Using this option will append - # the :ref:`config_http_conn_man_headers_x-forwarded-host` header if - # :ref:`append_x_forwarded_host - # ` - # is set. .. attention:: Pay attention to the potential security - # implications of using this option. Provided header must come from trusted - # source. .. note:: If the header appears multiple times only the first - # value is used. - host_rewrite_header: str = betterproto.string_field( - 29, group="host_rewrite_specifier" - ) - # Indicates that during forwarding, the host header will be swapped with the - # result of the regex substitution executed on path value with query and - # fragment removed. This is useful for transitioning variable content between - # path segment and subdomain. Using this option will append the - # :ref:`config_http_conn_man_headers_x-forwarded-host` header if - # :ref:`append_x_forwarded_host - # ` - # is set. For example with the following config: .. code-block:: yaml - # host_rewrite_path_regex: pattern: google_re2: {} - # regex: "^/(.+)/.+$" substitution: \1 Would rewrite the host header to - # `envoyproxy.io` given the path `/envoyproxy.io/some/path`. - host_rewrite_path_regex: "___type_matcher_v3__.RegexMatchAndSubstitute" = ( - betterproto.message_field(35, group="host_rewrite_specifier") - ) - # If set, then a host rewrite action (one of :ref:`host_rewrite_literal - # `, - # :ref:`auto_host_rewrite - # `, - # :ref:`host_rewrite_header - # `, or - # :ref:`host_rewrite_path_regex - # `) - # causes the original value of the host header, if any, to be appended to the - # :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header. - append_x_forwarded_host: bool = betterproto.bool_field(38) - # Specifies the upstream timeout for the route. If not specified, the default - # is 15s. This spans between the point at which the entire downstream request - # (i.e. end-of-stream) has been processed and when the upstream response has - # been completely processed. A value of 0 will disable the route's timeout. - # .. note:: This timeout includes all retries. See also - # :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - # :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, - # and the :ref:`retry overview `. - timeout: timedelta = betterproto.message_field(8) - # Specifies the idle timeout for the route. If not specified, there is no - # per-route idle timeout, although the connection manager wide - # :ref:`stream_idle_timeout ` will - # still apply. A value of 0 will completely disable the route's idle timeout, - # even if a connection manager stream idle timeout is configured. The idle - # timeout is distinct to :ref:`timeout - # `, which provides - # an upper bound on the upstream response time; :ref:`idle_timeout - # ` instead - # bounds the amount of time the request's stream may be idle. After header - # decoding, the idle timeout will apply on downstream and upstream request - # events. Each time an encode/decode event for headers or data is processed - # for the stream, the timer will be reset. If the timeout fires, the stream - # is terminated with a 408 Request Timeout error code if no upstream response - # header has been received, otherwise a stream reset occurs. If the - # :ref:`overload action ` - # "envoy.overload_actions.reduce_timeouts" is configured, this timeout is - # scaled according to the value for :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - idle_timeout: timedelta = betterproto.message_field(24) - # Indicates that the route has a retry policy. Note that if this is set, - # it'll take precedence over the virtual host level retry policy entirely - # (e.g.: policies are not merged, most internal one becomes the enforced - # policy). - retry_policy: "RetryPolicy" = betterproto.message_field(9) - # [#not-implemented-hide:] Specifies the configuration for retry policy - # extension. Note that if this is set, it'll take precedence over the virtual - # host level retry policy entirely (e.g.: policies are not merged, most - # internal one becomes the enforced policy). :ref:`Retry policy - # ` should not - # be set if this field is used. - retry_policy_typed_config: "betterproto_lib_google_protobuf.Any" = ( - betterproto.message_field(33) - ) - # Indicates that the route has request mirroring policies. - request_mirror_policies: List[ - "RouteActionRequestMirrorPolicy" - ] = betterproto.message_field(30) - # Optionally specifies the :ref:`routing priority - # `. - priority: "__core_v3__.RoutingPriority" = betterproto.enum_field(11) - # Specifies a set of rate limit configurations that could be applied to the - # route. - rate_limits: List["RateLimit"] = betterproto.message_field(13) - # Specifies if the rate limit filter should include the virtual host rate - # limits. By default, if the route configured rate limits, the virtual host - # :ref:`rate_limits - # ` are not - # applied to the request. This field is deprecated. Please use - # :ref:`vh_rate_limits ` - include_vh_rate_limits: Optional[bool] = betterproto.message_field( - 14, wraps=betterproto.TYPE_BOOL - ) - # Specifies a list of hash policies to use for ring hash load balancing. Each - # hash policy is evaluated individually and the combined result is used to - # route the request. The method of combination is deterministic such that - # identical lists of hash policies will produce the same hash. Since a hash - # policy examines specific parts of a request, it can fail to produce a hash - # (i.e. if the hashed header is not present). If (and only if) all configured - # hash policies fail to generate a hash, no hash will be produced for the - # route. In this case, the behavior is the same as if no hash policies were - # specified (i.e. the ring hash load balancer will choose a random backend). - # If a hash policy has the "terminal" attribute set to true, and there is - # already a hash generated, the hash is returned immediately, ignoring the - # rest of the hash policy list. - hash_policy: List["RouteActionHashPolicy"] = betterproto.message_field(15) - # Indicates that the route has a CORS policy. - cors: "CorsPolicy" = betterproto.message_field(17) - # Deprecated by :ref:`grpc_timeout_header_max ` If present, - # and the request is a gRPC request, use the `grpc-timeout header - # `_, or its - # default value (infinity) instead of :ref:`timeout - # `, but limit the - # applied timeout to the maximum value specified here. If configured as 0, - # the maximum allowed timeout for gRPC requests is infinity. If not - # configured at all, the `grpc-timeout` header is not used and gRPC requests - # time out like any other requests using :ref:`timeout - # ` or its default. - # This can be used to prevent unexpected upstream request timeouts due to - # potentially long time gaps between gRPC request and response in gRPC - # streaming mode. .. note:: If a timeout is specified using - # :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - # precedence over `grpc-timeout header - # `_, when - # both are present. See also :ref:`config_http_filters_router_x-envoy- - # upstream-rq-timeout-ms`, :ref:`config_http_filters_router_x-envoy- - # upstream-rq-per-try-timeout-ms`, and the :ref:`retry overview - # `. - max_grpc_timeout: timedelta = betterproto.message_field(23) - # Deprecated by :ref:`grpc_timeout_header_offset `. If - # present, Envoy will adjust the timeout provided by the `grpc-timeout` - # header by subtracting the provided duration from the header. This is useful - # in allowing Envoy to set its global timeout to be less than that of the - # deadline imposed by the calling client, which makes it more likely that - # Envoy will handle the timeout instead of having the call canceled by the - # client. The offset will only be applied if the provided grpc_timeout is - # greater than the offset. This ensures that the offset will only ever - # decrease the timeout and never set it to 0 (meaning infinity). - grpc_timeout_offset: timedelta = betterproto.message_field(28) - upgrade_configs: List["RouteActionUpgradeConfig"] = betterproto.message_field(25) - # If present, Envoy will try to follow an upstream redirect response instead - # of proxying the response back to the downstream. An upstream redirect - # response is defined by :ref:`redirect_response_codes `. - internal_redirect_policy: "InternalRedirectPolicy" = betterproto.message_field(34) - internal_redirect_action: "RouteActionInternalRedirectAction" = ( - betterproto.enum_field(26) - ) - # An internal redirect is handled, iff the number of previous internal - # redirects that a downstream request has encountered is lower than this - # value, and :ref:`internal_redirect_action - # ` - # is set to :ref:`HANDLE_INTERNAL_REDIRECT ` In the - # case where a downstream request is bounced among multiple routes by - # internal redirect, the first route that hits this threshold, or has - # :ref:`internal_redirect_action - # ` - # set to :ref:`PASS_THROUGH_INTERNAL_REDIRECT ` will pass the redirect back to downstream. If not specified, at most one - # redirect will be followed. - max_internal_redirects: Optional[int] = betterproto.message_field( - 31, wraps=betterproto.TYPE_UINT32 - ) - # Indicates that the route has a hedge policy. Note that if this is set, - # it'll take precedence over the virtual host level hedge policy entirely - # (e.g.: policies are not merged, most internal one becomes the enforced - # policy). - hedge_policy: "HedgePolicy" = betterproto.message_field(27) - # Specifies the maximum stream duration for this route. - max_stream_duration: "RouteActionMaxStreamDuration" = betterproto.message_field(36) - - def __post_init__(self) -> None: - super().__post_init__() - if self.include_vh_rate_limits: - warnings.warn( - "RouteAction.include_vh_rate_limits is deprecated", DeprecationWarning - ) - if self.max_grpc_timeout: - warnings.warn( - "RouteAction.max_grpc_timeout is deprecated", DeprecationWarning - ) - if self.grpc_timeout_offset: - warnings.warn( - "RouteAction.grpc_timeout_offset is deprecated", DeprecationWarning - ) - if self.internal_redirect_action: - warnings.warn( - "RouteAction.internal_redirect_action is deprecated", DeprecationWarning - ) - if self.max_internal_redirects: - warnings.warn( - "RouteAction.max_internal_redirects is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class RouteActionRequestMirrorPolicy(betterproto.Message): - """ - The router is capable of shadowing traffic from one cluster to another. The - current implementation is "fire and forget," meaning Envoy will not wait - for the shadow cluster to respond before returning the response from the - primary cluster. All normal statistics are collected for the shadow cluster - making this feature useful for testing. During shadowing, the - host/authority header is altered such that *-shadow* is appended. This is - useful for logging. For example, *cluster1* becomes *cluster1-shadow*. .. - note:: Shadowing will not be triggered if the primary cluster does not - exist. - """ - - # Specifies the cluster that requests will be mirrored to. The cluster must - # exist in the cluster manager configuration. - cluster: str = betterproto.string_field(1) - # If not specified, all requests to the target cluster will be mirrored. If - # specified, this field takes precedence over the `runtime_key` field and - # requests must also fall under the percentage of matches indicated by this - # field. For some fraction N/D, a random number in the range [0,D) is - # selected. If the number is <= the value of the numerator N, or if the key - # is not present, the default value, the request will be mirrored. - runtime_fraction: "__core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(3) - ) - # Determines if the trace span should be sampled. Defaults to true. - trace_sampled: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicy(betterproto.Message): - """ - Specifies the route's hashing policy if the upstream cluster uses a hashing - :ref:`load balancer `. [#next-free- - field: 7] - """ - - # Header hash policy. - header: "RouteActionHashPolicyHeader" = betterproto.message_field( - 1, group="policy_specifier" - ) - # Cookie hash policy. - cookie: "RouteActionHashPolicyCookie" = betterproto.message_field( - 2, group="policy_specifier" - ) - # Connection properties hash policy. - connection_properties: "RouteActionHashPolicyConnectionProperties" = ( - betterproto.message_field(3, group="policy_specifier") - ) - # Query parameter hash policy. - query_parameter: "RouteActionHashPolicyQueryParameter" = betterproto.message_field( - 5, group="policy_specifier" - ) - # Filter state hash policy. - filter_state: "RouteActionHashPolicyFilterState" = betterproto.message_field( - 6, group="policy_specifier" - ) - # The flag that short-circuits the hash computing. This field provides a - # 'fallback' style of configuration: "if a terminal policy doesn't work, - # fallback to rest of the policy list", it saves time when the terminal - # policy works. If true, and there is already a hash computed, ignore rest of - # the list of hash polices. For example, if the following hash methods are - # configured: ========= ======== specifier terminal ========= ======== - # Header A true Header B false Header C false ========= ======== The - # generateHash process ends if policy "header A" generates a hash, as it's a - # terminal policy. - terminal: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyHeader(betterproto.Message): - # The name of the request header that will be used to obtain the hash key. If - # the request header is not present, no hash will be produced. - header_name: str = betterproto.string_field(1) - # If specified, the request header value will be rewritten and used to - # produce the hash key. - regex_rewrite: "___type_matcher_v3__.RegexMatchAndSubstitute" = ( - betterproto.message_field(2) - ) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyCookie(betterproto.Message): - """ - Envoy supports two types of cookie affinity: 1. Passive. Envoy takes a - cookie that's present in the cookies header and hashes on its value. 2. - Generated. Envoy generates and sets a cookie with an expiration (TTL) on - the first request from the client in its response to the client, based - on the endpoint the request gets sent to. The client then presents this - on the next and all subsequent requests. The hash of this is sufficient - to ensure these requests get sent to the same endpoint. The cookie is - generated by hashing the source and destination ports and addresses so - that multiple independent HTTP2 streams on the same connection will - independently receive the same cookie, even if they arrive at the Envoy - simultaneously. - """ - - # The name of the cookie that will be used to obtain the hash key. If the - # cookie is not present and ttl below is not set, no hash will be produced. - name: str = betterproto.string_field(1) - # If specified, a cookie with the TTL will be generated if the cookie is not - # present. If the TTL is present and zero, the generated cookie will be a - # session cookie. - ttl: timedelta = betterproto.message_field(2) - # The name of the path for the cookie. If no path is specified here, no path - # will be set for the cookie. - path: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyConnectionProperties(betterproto.Message): - # Hash on source IP address. - source_ip: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyQueryParameter(betterproto.Message): - # The name of the URL query parameter that will be used to obtain the hash - # key. If the parameter is not present, no hash will be produced. Query - # parameter names are case-sensitive. - name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class RouteActionHashPolicyFilterState(betterproto.Message): - # The name of the Object in the per-request filterState, which is an - # Envoy::Hashable object. If there is no data associated with the key, or the - # stored object is not Envoy::Hashable, no hash will be produced. - key: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class RouteActionUpgradeConfig(betterproto.Message): - """ - Allows enabling and disabling upgrades on a per-route basis. This overrides - any enabled/disabled upgrade filter chain specified in the - HttpConnectionManager :ref:`upgrade_configs ` but does not affect any custom filter chain specified there. - """ - - # The case-insensitive name of this upgrade, e.g. "websocket". For each - # upgrade type present in upgrade_configs, requests with Upgrade: - # [upgrade_type] will be proxied upstream. - upgrade_type: str = betterproto.string_field(1) - # Determines if upgrades are available on this route. Defaults to true. - enabled: Optional[bool] = betterproto.message_field(2, wraps=betterproto.TYPE_BOOL) - # Configuration for sending data upstream as a raw data payload. This is used - # for CONNECT requests, when forwarding CONNECT payload as raw TCP. Note that - # CONNECT support is currently considered alpha in Envoy. [#comment: - # TODO(htuch): Replace the above comment with an alpha tag.] - connect_config: "RouteActionUpgradeConfigConnectConfig" = betterproto.message_field( - 3 - ) - - -@dataclass(eq=False, repr=False) -class RouteActionUpgradeConfigConnectConfig(betterproto.Message): - """ - Configuration for sending data upstream as a raw data payload. This is used - for CONNECT or POST requests, when forwarding request payload as raw TCP. - """ - - # If present, the proxy protocol header will be prepended to the CONNECT - # payload sent upstream. - proxy_protocol_config: "__core_v3__.ProxyProtocolConfig" = ( - betterproto.message_field(1) - ) - # If set, the route will also allow forwarding POST payload as raw TCP. - allow_post: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class RouteActionMaxStreamDuration(betterproto.Message): - # Specifies the maximum duration allowed for streams on the route. If not - # specified, the value from the :ref:`max_stream_duration ` field in - # :ref:`HttpConnectionManager.common_http_protocol_options ` is used. If this field is set explicitly - # to zero, any HttpConnectionManager max_stream_duration timeout will be - # disabled for this route. - max_stream_duration: timedelta = betterproto.message_field(1) - # If present, and the request contains a `grpc-timeout header - # `_, use - # that value as the *max_stream_duration*, but limit the applied timeout to - # the maximum value specified here. If set to 0, the `grpc-timeout` header is - # used without modification. - grpc_timeout_header_max: timedelta = betterproto.message_field(2) - # If present, Envoy will adjust the timeout provided by the `grpc-timeout` - # header by subtracting the provided duration from the header. This is useful - # for allowing Envoy to set its global timeout to be less than that of the - # deadline imposed by the calling client, which makes it more likely that - # Envoy will handle the timeout instead of having the call canceled by the - # client. If, after applying the offset, the resulting timeout is zero or - # negative, the stream will timeout immediately. - grpc_timeout_header_offset: timedelta = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class RetryPolicy(betterproto.Message): - """ - HTTP retry :ref:`architecture overview `. - [#next-free-field: 14] - """ - - # Specifies the conditions under which retry takes place. These are the same - # conditions documented for :ref:`config_http_filters_router_x-envoy-retry- - # on` and :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - retry_on: str = betterproto.string_field(1) - # Specifies the allowed number of retries. This parameter is optional and - # defaults to 1. These are the same conditions documented for - # :ref:`config_http_filters_router_x-envoy-max-retries`. - num_retries: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Specifies a non-zero upstream timeout per retry attempt (including the - # initial attempt). This parameter is optional. The same conditions - # documented for :ref:`config_http_filters_router_x-envoy-upstream-rq-per- - # try-timeout-ms` apply. .. note:: If left unspecified, Envoy will use the - # global :ref:`route timeout - # ` for the request. - # Consequently, when using a :ref:`5xx ` based retry policy, a request that times out will not be - # retried as the total timeout budget would have been exhausted. - per_try_timeout: timedelta = betterproto.message_field(3) - # Specifies an upstream idle timeout per retry attempt (including the initial - # attempt). This parameter is optional and if absent there is no per try idle - # timeout. The semantics of the per try idle timeout are similar to the - # :ref:`route idle timeout - # ` and :ref:`stream - # idle timeout ` both enforced by - # the HTTP connection manager. The difference is that this idle timeout is - # enforced by the router for each individual attempt and thus after all - # previous filters have run, as opposed to *before* all previous filters run - # for the other idle timeouts. This timeout is useful in cases in which total - # request timeout is bounded by a number of retries and a - # :ref:`per_try_timeout - # `, but - # there is a desire to ensure each try is making incremental progress. Note - # also that similar to :ref:`per_try_timeout - # `, this - # idle timeout does not start until after both the entire request has been - # received by the router *and* a connection pool connection has been - # obtained. Unlike :ref:`per_try_timeout - # `, the idle - # timer continues once the response starts streaming back to the downstream - # client. This ensures that response data continues to make progress without - # using one of the HTTP connection manager idle timeouts. - per_try_idle_timeout: timedelta = betterproto.message_field(13) - # Specifies an implementation of a RetryPriority which is used to determine - # the distribution of load across priorities used for retries. Refer to - # :ref:`retry plugin configuration ` for - # more details. - retry_priority: "RetryPolicyRetryPriority" = betterproto.message_field(4) - # Specifies a collection of RetryHostPredicates that will be consulted when - # selecting a host for retries. If any of the predicates reject the host, - # host selection will be reattempted. Refer to :ref:`retry plugin - # configuration ` for more details. - retry_host_predicate: List[ - "RetryPolicyRetryHostPredicate" - ] = betterproto.message_field(5) - # Retry options predicates that will be applied prior to retrying a request. - # These predicates allow customizing request behavior between retries. - # [#comment: add [#extension-category: envoy.retry_options_predicates] when - # there are built-in extensions] - retry_options_predicates: List[ - "__core_v3__.TypedExtensionConfig" - ] = betterproto.message_field(12) - # The maximum number of times host selection will be reattempted before - # giving up, at which point the host that was last selected will be routed - # to. If unspecified, this will default to retrying once. - host_selection_retry_max_attempts: int = betterproto.int64_field(6) - # HTTP status codes that should trigger a retry in addition to those - # specified by retry_on. - retriable_status_codes: List[int] = betterproto.uint32_field(7) - # Specifies parameters that control exponential retry back off. This - # parameter is optional, in which case the default base interval is 25 - # milliseconds or, if set, the current value of the - # `upstream.base_retry_backoff_ms` runtime parameter. The default maximum - # interval is 10 times the base interval. The documentation for - # :ref:`config_http_filters_router_x-envoy-max-retries` describes Envoy's - # back-off algorithm. - retry_back_off: "RetryPolicyRetryBackOff" = betterproto.message_field(8) - # Specifies parameters that control a retry back-off strategy that is used - # when the request is rate limited by the upstream server. The server may - # return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to - # provide feedback to the client on how long to wait before retrying. If - # configured, this back-off strategy will be used instead of the default - # exponential back off strategy (configured using `retry_back_off`) whenever - # a response includes the matching headers. - rate_limited_retry_back_off: "RetryPolicyRateLimitedRetryBackOff" = ( - betterproto.message_field(11) - ) - # HTTP response headers that trigger a retry if present in the response. A - # retry will be triggered if any of the header matches match the upstream - # response headers. The field is only consulted if 'retriable-headers' retry - # policy is active. - retriable_headers: List["HeaderMatcher"] = betterproto.message_field(9) - # HTTP headers which must be present in the request for retries to be - # attempted. - retriable_request_headers: List["HeaderMatcher"] = betterproto.message_field(10) - - -@dataclass(eq=False, repr=False) -class RetryPolicyRetryPriority(betterproto.Message): - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class RetryPolicyRetryHostPredicate(betterproto.Message): - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class RetryPolicyRetryBackOff(betterproto.Message): - # Specifies the base interval between retries. This parameter is required and - # must be greater than zero. Values less than 1 ms are rounded up to 1 ms. - # See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - # of Envoy's back-off algorithm. - base_interval: timedelta = betterproto.message_field(1) - # Specifies the maximum interval between retries. This parameter is optional, - # but must be greater than or equal to the `base_interval` if set. The - # default is 10 times the `base_interval`. See - # :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of - # Envoy's back-off algorithm. - max_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RetryPolicyResetHeader(betterproto.Message): - # The name of the reset header. .. note:: If the header appears multiple - # times only the first value is used. - name: str = betterproto.string_field(1) - # The format of the reset header. - format: "RetryPolicyResetHeaderFormat" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class RetryPolicyRateLimitedRetryBackOff(betterproto.Message): - """ - A retry back-off strategy that applies when the upstream server rate limits - the request. Given this configuration: .. code-block:: yaml - rate_limited_retry_back_off: reset_headers: - name: Retry-After - format: SECONDS - name: X-RateLimit-Reset format: UNIX_TIMESTAMP - max_interval: "300s" The following algorithm will apply: 1. If the - response contains the header ``Retry-After`` its value must be on the - form ``120`` (an integer that represents the number of seconds to wait - before retrying). If so, this value is used as the back-off interval. 2. - Otherwise, if the response contains the header ``X-RateLimit-Reset`` its - value must be on the form ``1595320702`` (an integer that represents the - point in time at which to retry, as a Unix timestamp in seconds). If so, - the current time is subtracted from this value and the result is used as - the back-off interval. 3. Otherwise, Envoy will use the default - :ref:`exponential back-off - ` - strategy. No matter which format is used, if the resulting back-off - interval exceeds ``max_interval`` it is discarded and the next header in - ``reset_headers`` is tried. If a request timeout is configured for the - route it will further limit how long the request will be allowed to run. To - prevent many clients retrying at the same point in time jitter is added to - the back-off interval, so the resulting interval is decided by taking: - ``random(interval, interval * 1.5)``. .. attention:: Configuring - ``rate_limited_retry_back_off`` will not by itself cause a request to be - retried. You will still need to configure the right retry policy to match - the responses from the upstream server. - """ - - # Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) - # to match against the response. Headers are tried in order, and matched case - # insensitive. The first header to be parsed successfully is used. If no - # headers match the default exponential back-off is used instead. - reset_headers: List["RetryPolicyResetHeader"] = betterproto.message_field(1) - # Specifies the maximum back off interval that Envoy will allow. If a reset - # header contains an interval longer than this then it will be discarded and - # the next header will be tried. Defaults to 300 seconds. - max_interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HedgePolicy(betterproto.Message): - """ - HTTP request hedging :ref:`architecture overview - `. - """ - - # Specifies the number of initial requests that should be sent upstream. Must - # be at least 1. Defaults to 1. [#not-implemented-hide:] - initial_requests: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Specifies a probability that an additional upstream request should be sent - # on top of what is specified by initial_requests. Defaults to 0. [#not- - # implemented-hide:] - additional_request_chance: "___type_v3__.FractionalPercent" = ( - betterproto.message_field(2) - ) - # Indicates that a hedged request should be sent when the per-try timeout is - # hit. This means that a retry will be issued without resetting the original - # request, leaving multiple upstream requests in flight. The first request to - # complete successfully will be the one returned to the caller. * At any - # time, a successful response (i.e. not triggering any of the retry-on - # conditions) would be returned to the client. * Before per-try timeout, an - # error response (per retry-on conditions) would be retried immediately or - # returned ot the client if there are no more retries left. * After per-try - # timeout, an error response would be discarded, as a retry in the form of a - # hedged request is already in progress. Note: For this to have effect, you - # must have a :ref:`RetryPolicy - # ` that retries at least one - # error code and specifies a maximum number of retries. Defaults to false. - hedge_on_per_try_timeout: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class RedirectAction(betterproto.Message): - """[#next-free-field: 10]""" - - # The scheme portion of the URL will be swapped with "https". - https_redirect: bool = betterproto.bool_field(4, group="scheme_rewrite_specifier") - # The scheme portion of the URL will be swapped with this value. - scheme_redirect: str = betterproto.string_field(7, group="scheme_rewrite_specifier") - # The host portion of the URL will be swapped with this value. - host_redirect: str = betterproto.string_field(1) - # The port value of the URL will be swapped with this value. - port_redirect: int = betterproto.uint32_field(8) - # The path portion of the URL will be swapped with this value. Please note - # that query string in path_redirect will override the request's query string - # and will not be stripped. For example, let's say we have the following - # routes: - match: { path: "/old-path-1" } redirect: { path_redirect: - # "/new-path-1" } - match: { path: "/old-path-2" } redirect: { - # path_redirect: "/new-path-2", strip-query: "true" } - match: { path: "/old- - # path-3" } redirect: { path_redirect: "/new-path-3?foo=1", strip_query: - # "true" } 1. if request uri is "/old-path-1?bar=1", users will be redirected - # to "/new-path-1?bar=1" 2. if request uri is "/old-path-2?bar=1", users will - # be redirected to "/new-path-2" 3. if request uri is "/old-path-3?bar=1", - # users will be redirected to "/new-path-3?foo=1" - path_redirect: str = betterproto.string_field(2, group="path_rewrite_specifier") - # Indicates that during redirection, the matched prefix (or path) should be - # swapped with this value. This option allows redirect URLs be dynamically - # created based on the request. .. attention:: Pay attention to the use of - # trailing slashes as mentioned in :ref:`RouteAction's prefix_rewrite - # `. - prefix_rewrite: str = betterproto.string_field(5, group="path_rewrite_specifier") - # Indicates that during redirect, portions of the path that match the pattern - # should be rewritten, even allowing the substitution of capture groups from - # the pattern into the new path as specified by the rewrite substitution - # string. This is useful to allow application paths to be rewritten in a way - # that is aware of segments with variable content like identifiers. Examples - # using Google's `RE2 `_ engine: * The path - # pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution string of - # ``\2/instance/\1`` would transform ``/service/foo/v1/api`` into - # ``/v1/api/instance/foo``. * The pattern ``one`` paired with a substitution - # string of ``two`` would transform ``/xxx/one/yyy/one/zzz`` into - # ``/xxx/two/yyy/two/zzz``. * The pattern ``^(.*?)one(.*)$`` paired with a - # substitution string of ``\1two\2`` would replace only the first - # occurrence of ``one``, transforming path ``/xxx/one/yyy/one/zzz`` into - # ``/xxx/two/yyy/one/zzz``. * The pattern ``(?i)/xxx/`` paired with a - # substitution string of ``/yyy/`` would do a case-insensitive match and - # transform path ``/aaa/XxX/bbb`` to ``/aaa/yyy/bbb``. - regex_rewrite: "___type_matcher_v3__.RegexMatchAndSubstitute" = ( - betterproto.message_field(9, group="path_rewrite_specifier") - ) - # The HTTP status code to use in the redirect response. The default response - # code is MOVED_PERMANENTLY (301). - response_code: "RedirectActionRedirectResponseCode" = betterproto.enum_field(3) - # Indicates that during redirection, the query portion of the URL will be - # removed. Default value is false. - strip_query: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class DirectResponseAction(betterproto.Message): - # Specifies the HTTP response status to be returned. - status: int = betterproto.uint32_field(1) - # Specifies the content of the response body. If this setting is omitted, no - # body is included in the generated response. .. note:: Headers can be - # specified using *response_headers_to_add* in the enclosing - # :ref:`envoy_v3_api_msg_config.route.v3.Route`, - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or - # :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. - body: "__core_v3__.DataSource" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class NonForwardingAction(betterproto.Message): - """[#not-implemented-hide:]""" - - pass - - -@dataclass(eq=False, repr=False) -class Decorator(betterproto.Message): - # The operation name associated with the request matched to this route. If - # tracing is enabled, this information will be used as the span name reported - # for this request. .. note:: For ingress (inbound) requests, or egress - # (outbound) responses, this value may be overridden by the :ref:`x-envoy- - # decorator-operation ` header. - operation: str = betterproto.string_field(1) - # Whether the decorated details should be propagated to the other party. The - # default is true. - propagate: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class Tracing(betterproto.Message): - # Target percentage of requests managed by this HTTP connection manager that - # will be force traced if the :ref:`x-client-trace-id - # ` header is set. This field - # is a direct analog for the runtime variable 'tracing.client_sampling' in - # the :ref:`HTTP Connection Manager `. Default: - # 100% - client_sampling: "___type_v3__.FractionalPercent" = betterproto.message_field(1) - # Target percentage of requests managed by this HTTP connection manager that - # will be randomly selected for trace generation, if not requested by the - # client or not forced. This field is a direct analog for the runtime - # variable 'tracing.random_sampling' in the :ref:`HTTP Connection Manager - # `. Default: 100% - random_sampling: "___type_v3__.FractionalPercent" = betterproto.message_field(2) - # Target percentage of requests managed by this HTTP connection manager that - # will be traced after all other sampling checks have been applied (client- - # directed, force tracing, random sampling). This field functions as an upper - # limit on the total configured sampling rate. For instance, setting - # client_sampling to 100% but overall_sampling to 1% will result in only 1% - # of client requests with the appropriate headers to be force traced. This - # field is a direct analog for the runtime variable 'tracing.global_enabled' - # in the :ref:`HTTP Connection Manager `. - # Default: 100% - overall_sampling: "___type_v3__.FractionalPercent" = betterproto.message_field(3) - # A list of custom tags with unique tag name to create tags for the active - # span. It will take effect after merging with the :ref:`corresponding - # configuration ` configured in the - # HTTP connection manager. If two tags with the same name are configured each - # in the HTTP connection manager and the route level, the one configured here - # takes priority. - custom_tags: List["___type_tracing_v3__.CustomTag"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class VirtualCluster(betterproto.Message): - """ - A virtual cluster is a way of specifying a regex matching rule against - certain important endpoints such that statistics are generated explicitly - for the matched requests. The reason this is useful is that when doing - prefix/path matching Envoy does not always know what the application - considers to be an endpoint. Thus, it’s impossible for Envoy to generically - emit per endpoint statistics. However, often systems have highly critical - endpoints that they wish to get “perfect” statistics on. Virtual cluster - statistics are perfect in the sense that they are emitted on the downstream - side such that they include network level failures. Documentation for - :ref:`virtual cluster statistics - `. .. note:: Virtual clusters - are a useful tool, but we do not recommend setting up a virtual cluster for - every application endpoint. This is both not easily maintainable and as - well the matching and statistics output are not free. - """ - - # Specifies a list of header matchers to use for matching requests. Each - # specified header must match. The pseudo-headers `:path` and `:method` can - # be used to match the request path and method, respectively. - headers: List["HeaderMatcher"] = betterproto.message_field(4) - # Specifies the name of the virtual cluster. The virtual cluster name as well - # as the virtual host name are used when emitting statistics. The statistics - # are emitted by the router filter and are documented :ref:`here - # `. - name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """ - Global rate limiting :ref:`architecture overview - `. Also applies to Local rate limiting - :ref:`using descriptors - `. - """ - - # Refers to the stage set in the filter. The rate limit configuration only - # applies to filters with the same stage number. The default stage number is - # 0. .. note:: The filter supports a range of 0 - 10 inclusively for stage - # numbers. - stage: Optional[int] = betterproto.message_field(1, wraps=betterproto.TYPE_UINT32) - # The key to be set in runtime to disable this rate limit configuration. - disable_key: str = betterproto.string_field(2) - # A list of actions that are to be applied for this rate limit configuration. - # Order matters as the actions are processed sequentially and the descriptor - # is composed by appending descriptor entries in that sequence. If an action - # cannot append a descriptor entry, no descriptor is generated for the - # configuration. See :ref:`composing actions - # ` for additional - # documentation. - actions: List["RateLimitAction"] = betterproto.message_field(3) - # An optional limit override to be appended to the descriptor produced by - # this rate limit configuration. If the override value is invalid or cannot - # be resolved from metadata, no override is provided. See :ref:`rate limit - # override ` for more - # information. - limit: "RateLimitOverride" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RateLimitAction(betterproto.Message): - """[#next-free-field: 10]""" - - # Rate limit on source cluster. - source_cluster: "RateLimitActionSourceCluster" = betterproto.message_field( - 1, group="action_specifier" - ) - # Rate limit on destination cluster. - destination_cluster: "RateLimitActionDestinationCluster" = ( - betterproto.message_field(2, group="action_specifier") - ) - # Rate limit on request headers. - request_headers: "RateLimitActionRequestHeaders" = betterproto.message_field( - 3, group="action_specifier" - ) - # Rate limit on remote address. - remote_address: "RateLimitActionRemoteAddress" = betterproto.message_field( - 4, group="action_specifier" - ) - # Rate limit on a generic key. - generic_key: "RateLimitActionGenericKey" = betterproto.message_field( - 5, group="action_specifier" - ) - # Rate limit on the existence of request headers. - header_value_match: "RateLimitActionHeaderValueMatch" = betterproto.message_field( - 6, group="action_specifier" - ) - # Rate limit on dynamic metadata. .. attention:: This field has been - # deprecated in favor of the :ref:`metadata - # ` field - dynamic_metadata: "RateLimitActionDynamicMetaData" = betterproto.message_field( - 7, group="action_specifier" - ) - # Rate limit on metadata. - metadata: "RateLimitActionMetaData" = betterproto.message_field( - 8, group="action_specifier" - ) - # Rate limit descriptor extension. See the rate limit descriptor extensions - # documentation. [#extension-category: envoy.rate_limit_descriptors] - extension: "__core_v3__.TypedExtensionConfig" = betterproto.message_field( - 9, group="action_specifier" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.dynamic_metadata: - warnings.warn( - "RateLimitAction.dynamic_metadata is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class RateLimitActionSourceCluster(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("source_cluster", "") is derived from the :option:`--service-cluster` option. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitActionDestinationCluster(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("destination_cluster", "") Once a - request matches against a route table rule, a routed cluster is determined - by one of the following :ref:`route table configuration - ` settings: * - :ref:`cluster ` - indicates the upstream cluster to route to. * :ref:`weighted_clusters - ` - chooses a cluster randomly from a set of clusters with attributed weight. * - :ref:`cluster_header - ` indicates - which header in the request contains the target cluster. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitActionRequestHeaders(betterproto.Message): - """ - The following descriptor entry is appended when a header contains a key - that matches the *header_name*: .. code-block:: cpp ("", - "") - """ - - # The header name to be queried from the request headers. The header’s value - # is used to populate the value of the descriptor entry for the - # descriptor_key. - header_name: str = betterproto.string_field(1) - # The key to use in the descriptor entry. - descriptor_key: str = betterproto.string_field(2) - # If set to true, Envoy skips the descriptor while calling rate limiting - # service when header is not present in the request. By default it skips - # calling the rate limiting service if this header is not present in the - # request. - skip_if_absent: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitActionRemoteAddress(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor and is - populated using the trusted address from :ref:`x-forwarded-for - `: .. code-block:: cpp - ("remote_address", "") - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitActionGenericKey(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("generic_key", "") - """ - - # The value to use in the descriptor entry. - descriptor_value: str = betterproto.string_field(1) - # An optional key to use in the descriptor entry. If not set it defaults to - # 'generic_key' as the descriptor key. - descriptor_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimitActionHeaderValueMatch(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("header_match", "") - """ - - # The value to use in the descriptor entry. - descriptor_value: str = betterproto.string_field(1) - # If set to true, the action will append a descriptor entry when the request - # matches the headers. If set to false, the action will append a descriptor - # entry when the request does not match the headers. The default value is - # true. - expect_match: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # Specifies a set of headers that the rate limit action should match on. The - # action will check the request’s headers against all the specified headers - # in the config. A match will happen if all the headers in the config are - # present in the request with the same values (or based on presence if the - # value field is not in the config). - headers: List["HeaderMatcher"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitActionDynamicMetaData(betterproto.Message): - """ - The following descriptor entry is appended when the :ref:`dynamic metadata - ` contains a key value: .. code-block:: cpp - ("", "") .. - attention:: This action has been deprecated in favor of the - :ref:`metadata - ` action - """ - - # The key to use in the descriptor entry. - descriptor_key: str = betterproto.string_field(1) - # Metadata struct that defines the key and path to retrieve the string value. - # A match will only happen if the value in the dynamic metadata is of type - # string. - metadata_key: "___type_metadata_v3__.MetadataKey" = betterproto.message_field(2) - # An optional value to use if *metadata_key* is empty. If not set and no - # value is present under the metadata_key then no descriptor is generated. - default_value: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitActionMetaData(betterproto.Message): - """ - The following descriptor entry is appended when the metadata contains a key - value: .. code-block:: cpp ("", - "") - """ - - # The key to use in the descriptor entry. - descriptor_key: str = betterproto.string_field(1) - # Metadata struct that defines the key and path to retrieve the string value. - # A match will only happen if the value in the metadata is of type string. - metadata_key: "___type_metadata_v3__.MetadataKey" = betterproto.message_field(2) - # An optional value to use if *metadata_key* is empty. If not set and no - # value is present under the metadata_key then no descriptor is generated. - default_value: str = betterproto.string_field(3) - # Source of metadata - source: "RateLimitActionMetaDataSource" = betterproto.enum_field(4) - - -@dataclass(eq=False, repr=False) -class RateLimitOverride(betterproto.Message): - # Limit override from dynamic metadata. - dynamic_metadata: "RateLimitOverrideDynamicMetadata" = betterproto.message_field( - 1, group="override_specifier" - ) - - -@dataclass(eq=False, repr=False) -class RateLimitOverrideDynamicMetadata(betterproto.Message): - """Fetches the override from the dynamic metadata.""" - - # Metadata struct that defines the key and path to retrieve the struct value. - # The value must be a struct containing an integer "requests_per_unit" - # property and a "unit" property with a value parseable to - # :ref:`RateLimitUnit enum ` - metadata_key: "___type_metadata_v3__.MetadataKey" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HeaderMatcher(betterproto.Message): - """ - .. attention:: Internally, Envoy always uses the HTTP/2 *:authority* - header to represent the HTTP/1 *Host* header. Thus, if attempting to - match on *Host*, match on *:authority* instead. .. attention:: To route - on HTTP method, use the special HTTP/2 *:method* header. This works for - both HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., .. code- - block:: json { "name": ":method", "exact_match": "POST" - } .. attention:: In the absence of any header match specifier, match will - default to :ref:`present_match - `. i.e, a - request that has the :ref:`name - ` header will match, - regardless of the header's value. [#next-major-version: HeaderMatcher - should be refactored to use StringMatcher.] [#next-free-field: 14] - """ - - # Specifies the name of the header in the request. - name: str = betterproto.string_field(1) - # If specified, header match will be performed based on the value of the - # header. This field is deprecated. Please use :ref:`string_match - # `. - exact_match: str = betterproto.string_field(4, group="header_match_specifier") - # If specified, this regex string is a regular expression rule which implies - # the entire request header value must match the regex. The rule will not - # match if only a subsequence of the request header value matches the regex. - # This field is deprecated. Please use :ref:`string_match - # `. - safe_regex_match: "___type_matcher_v3__.RegexMatcher" = betterproto.message_field( - 11, group="header_match_specifier" - ) - # If specified, header match will be performed based on range. The rule will - # match if the request header value is within this range. The entire request - # header value must represent an integer in base 10 notation: consisting of - # an optional plus or minus sign followed by a sequence of digits. The rule - # will not match if the header value does not represent an integer. Match - # will fail for empty values, floating point numbers or if only a subsequence - # of the header value is an integer. Examples: * For range [-10,0), route - # will match for header value -1, but not for 0, "somestring", 10.9, - # "-1somestring" - range_match: "___type_v3__.Int64Range" = betterproto.message_field( - 6, group="header_match_specifier" - ) - # If specified as true, header match will be performed based on whether the - # header is in the request. If specified as false, header match will be - # performed based on whether the header is absent. - present_match: bool = betterproto.bool_field(7, group="header_match_specifier") - # If specified, header match will be performed based on the prefix of the - # header value. Note: empty prefix is not allowed, please use present_match - # instead. This field is deprecated. Please use :ref:`string_match - # `. Examples: - # * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - prefix_match: str = betterproto.string_field(9, group="header_match_specifier") - # If specified, header match will be performed based on the suffix of the - # header value. Note: empty suffix is not allowed, please use present_match - # instead. This field is deprecated. Please use :ref:`string_match - # `. Examples: - # * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - suffix_match: str = betterproto.string_field(10, group="header_match_specifier") - # If specified, header match will be performed based on whether the header - # value contains the given value or not. Note: empty contains match is not - # allowed, please use present_match instead. This field is deprecated. Please - # use :ref:`string_match - # `. Examples: - # * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - contains_match: str = betterproto.string_field(12, group="header_match_specifier") - # If specified, header match will be performed based on the string match of - # the header value. - string_match: "___type_matcher_v3__.StringMatcher" = betterproto.message_field( - 13, group="header_match_specifier" - ) - # If specified, the match result will be inverted before checking. Defaults - # to false. Examples: * The regex ``\d{3}`` does not match the value *1234*, - # so it will match when inverted. * The range [-10,0) will match the value - # -1, so it will not match when inverted. - invert_match: bool = betterproto.bool_field(8) - - def __post_init__(self) -> None: - super().__post_init__() - if self.exact_match: - warnings.warn("HeaderMatcher.exact_match is deprecated", DeprecationWarning) - if self.safe_regex_match: - warnings.warn( - "HeaderMatcher.safe_regex_match is deprecated", DeprecationWarning - ) - if self.prefix_match: - warnings.warn( - "HeaderMatcher.prefix_match is deprecated", DeprecationWarning - ) - if self.suffix_match: - warnings.warn( - "HeaderMatcher.suffix_match is deprecated", DeprecationWarning - ) - if self.contains_match: - warnings.warn( - "HeaderMatcher.contains_match is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class QueryParameterMatcher(betterproto.Message): - """ - Query parameter matching treats the query string of a request's :path - header as an ampersand-separated list of keys and/or key=value elements. - [#next-free-field: 7] - """ - - # Specifies the name of a key that must be present in the requested *path*'s - # query string. - name: str = betterproto.string_field(1) - # Specifies whether a query parameter value should match against a string. - string_match: "___type_matcher_v3__.StringMatcher" = betterproto.message_field( - 5, group="query_parameter_match_specifier" - ) - # Specifies whether a query parameter should be present. - present_match: bool = betterproto.bool_field( - 6, group="query_parameter_match_specifier" - ) - - -@dataclass(eq=False, repr=False) -class InternalRedirectPolicy(betterproto.Message): - """ - HTTP Internal Redirect :ref:`architecture overview - `. - """ - - # An internal redirect is not handled, unless the number of previous internal - # redirects that a downstream request has encountered is lower than this - # value. In the case where a downstream request is bounced among multiple - # routes by internal redirect, the first route that hits this threshold, or - # does not set :ref:`internal_redirect_policy - # ` - # will pass the redirect back to downstream. If not specified, at most one - # redirect will be followed. - max_internal_redirects: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Defines what upstream response codes are allowed to trigger internal - # redirect. If unspecified, only 302 will be treated as internal redirect. - # Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be - # ignored. - redirect_response_codes: List[int] = betterproto.uint32_field(2) - # Specifies a list of predicates that are queried when an upstream response - # is deemed to trigger an internal redirect by all other criteria. Any - # predicate in the list can reject the redirect, causing the response to be - # proxied to downstream. [#extension-category: - # envoy.internal_redirect_predicates] - predicates: List["__core_v3__.TypedExtensionConfig"] = betterproto.message_field(3) - # Allow internal redirect to follow a target URI with a different scheme than - # the value of x-forwarded-proto. The default is false. - allow_cross_scheme_redirect: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """ - A simple wrapper for an HTTP filter config. This is intended to be used as - a wrapper for the map value in :ref:`VirtualHost.typed_per_filter_config`, :re - f:`Route.typed_per_filter_config`, or :ref:`WeightedCluster.ClusterWeight.typed_per_ - filter_config` to add additional flags to the filter. [#not- - implemented-hide:] - """ - - # The filter config. - config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # If true, the filter is optional, meaning that if the client does not - # support the specified filter, it may ignore the map entry rather than - # rejecting the config. - is_optional: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class RouteConfiguration(betterproto.Message): - """[#next-free-field: 13]""" - - # The name of the route configuration. For example, it might match - # :ref:`route_config_name ` in :ref:`envoy_v3_api_msg_ex - # tensions.filters.network.http_connection_manager.v3.Rds`. - name: str = betterproto.string_field(1) - # An array of virtual hosts that make up the route table. - virtual_hosts: List["VirtualHost"] = betterproto.message_field(2) - # An array of virtual hosts will be dynamically loaded via the VHDS API. Both - # *virtual_hosts* and *vhds* fields will be used when present. - # *virtual_hosts* can be used for a base routing table or for infrequently - # changing virtual hosts. *vhds* is used for on-demand discovery of virtual - # hosts. The contents of these two fields will be merged to generate a - # routing table for a given RouteConfiguration, with *vhds* derived - # configuration taking precedence. - vhds: "Vhds" = betterproto.message_field(9) - # Optionally specifies a list of HTTP headers that the connection manager - # will consider to be internal only. If they are found on external requests - # they will be cleaned prior to filter invocation. See - # :ref:`config_http_conn_man_headers_x-envoy-internal` for more information. - internal_only_headers: List[str] = betterproto.string_field(3) - # Specifies a list of HTTP headers that should be added to each response that - # the connection manager encodes. Headers specified at this level are applied - # after headers from any enclosed - # :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - # :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, - # including details on header value syntax, see the documentation on - # :ref:`custom request headers - # `. - response_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(4) - # Specifies a list of HTTP headers that should be removed from each response - # that the connection manager encodes. - response_headers_to_remove: List[str] = betterproto.string_field(5) - # Specifies a list of HTTP headers that should be added to each request - # routed by the HTTP connection manager. Headers specified at this level are - # applied after headers from any enclosed - # :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - # :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, - # including details on header value syntax, see the documentation on - # :ref:`custom request headers - # `. - request_headers_to_add: List[ - "__core_v3__.HeaderValueOption" - ] = betterproto.message_field(6) - # Specifies a list of HTTP headers that should be removed from each request - # routed by the HTTP connection manager. - request_headers_to_remove: List[str] = betterproto.string_field(8) - # By default, headers that should be added/removed are evaluated from most to - # least specific: * route level * virtual host level * connection manager - # level To allow setting overrides at the route or virtual host level, this - # order can be reversed by setting this option to true. Defaults to false. - # [#next-major-version: In the v3 API, this will default to true.] - most_specific_header_mutations_wins: bool = betterproto.bool_field(10) - # An optional boolean that specifies whether the clusters that the route - # table refers to will be validated by the cluster manager. If set to true - # and a route refers to a non-existent cluster, the route table will not - # load. If set to false and a route refers to a non-existent cluster, the - # route table will load and the router filter will return a 404 if the route - # is selected at runtime. This setting defaults to true if the route table is - # statically defined via the :ref:`route_config ` option. This setting default to false if the route table is loaded - # dynamically via the :ref:`rds ` option. Users may - # wish to override the default behavior in certain cases (for example when - # using CDS with a static route table). - validate_clusters: Optional[bool] = betterproto.message_field( - 7, wraps=betterproto.TYPE_BOOL - ) - # The maximum bytes of the response :ref:`direct response body - # ` size. If - # not specified the default is 4096. .. warning:: Envoy currently holds the - # content of :ref:`direct response body - # ` in memory. - # Be careful setting this to be larger than the default 4KB, since the - # allocated memory for direct response body is not subject to data plane - # buffering controls. - max_direct_response_body_size_bytes: Optional[int] = betterproto.message_field( - 11, wraps=betterproto.TYPE_UINT32 - ) - # [#not-implemented-hide:] A list of plugins and their configurations which - # may be used by a :ref:`envoy_v3_api_field_config.route.v3.RouteAction.clust - # er_specifier_plugin` within the route. All *extension.name* fields in this - # list must be unique. - cluster_specifier_plugins: List[ - "ClusterSpecifierPlugin" - ] = betterproto.message_field(12) - - -@dataclass(eq=False, repr=False) -class ClusterSpecifierPlugin(betterproto.Message): - """Configuration for a cluster specifier plugin.""" - - # The name of the plugin and its opaque configuration. - extension: "__core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Vhds(betterproto.Message): - # Configuration source specifier for VHDS. - config_source: "__core_v3__.ConfigSource" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfiguration(betterproto.Message): - """ - Specifies a routing scope, which associates a - :ref:`Key` - to a :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. The - :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` can be obtained - dynamically via RDS (:ref:`route_configuration_name`) or - specified inline (:ref:`route_configuration`). The HTTP connection - manager builds up a table consisting of these Key to RouteConfiguration - mappings, and looks up the RouteConfiguration to use per request according - to the algorithm specified in the :ref:`scope_key_builder` assigned to the HttpConnectionManager. For example, with the - following configurations (in YAML): HttpConnectionManager config: .. code:: - ... scoped_routes: name: foo-scoped-routes scope_key_builder: - fragments: - header_value_extractor: name: X-Route- - Selector element_separator: , element: - separator: = key: vip ScopedRouteConfiguration resources - (specified statically via :ref:`scoped_route_configurations_list` or obtained dynamically via SRDS): .. - code:: (1) name: route-scope1 route_configuration_name: route-config1 - key: fragments: - string_key: 172.10.10.20 (2) name: route- - scope2 route_configuration_name: route-config2 key: fragments: - - string_key: 172.20.20.30 A request from a client such as: .. code:: - GET / HTTP/1.1 Host: foo.com X-Route-Selector: vip=172.10.10.20 - would result in the routing table defined by the `route-config1` - RouteConfiguration being assigned to the HTTP request/stream. [#next-free- - field: 6] - """ - - # Whether the RouteConfiguration should be loaded on demand. - on_demand: bool = betterproto.bool_field(4) - # The name assigned to the routing scope. - name: str = betterproto.string_field(1) - # The resource name to use for a - # :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an RDS - # server to fetch the - # :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated with - # this scope. - route_configuration_name: str = betterproto.string_field(2) - # The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated - # with the scope. - route_configuration: "RouteConfiguration" = betterproto.message_field(5) - # The key to match against. - key: "ScopedRouteConfigurationKey" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfigurationKey(betterproto.Message): - """ - Specifies a key which is matched against the output of the :ref:`scope_key_ - builder` specified in the - HttpConnectionManager. The matching is done per HTTP request and is - dependent on the order of the fragments contained in the Key. - """ - - # The ordered set of fragments to match against. The order must match the - # fragments in the corresponding :ref:`scope_key_builder`. - fragments: List["ScopedRouteConfigurationKeyFragment"] = betterproto.message_field( - 1 - ) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfigurationKeyFragment(betterproto.Message): - # A string to match against. - string_key: str = betterproto.string_field(1, group="type") - - -from .....xds.type.matcher import v3 as ____xds_type_matcher_v3__ -from ....type import v3 as ___type_v3__ -from ....type.matcher import v3 as ___type_matcher_v3__ -from ....type.metadata import v3 as ___type_metadata_v3__ -from ....type.tracing import v3 as ___type_tracing_v3__ -from ...core import v3 as __core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/tap/__init__.py b/src/envoy_data_plane/envoy/config/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/tap/v3/__init__.py b/src/envoy_data_plane/envoy/config/tap/v3/__init__.py deleted file mode 100644 index deba3d5..0000000 --- a/src/envoy_data_plane/envoy/config/tap/v3/__init__.py +++ /dev/null @@ -1,241 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/tap/v3/common.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class OutputSinkFormat(betterproto.Enum): - JSON_BODY_AS_BYTES = 0 - JSON_BODY_AS_STRING = 1 - PROTO_BINARY = 2 - PROTO_BINARY_LENGTH_DELIMITED = 3 - PROTO_TEXT = 4 - - -@dataclass(eq=False, repr=False) -class TapConfig(betterproto.Message): - """Tap configuration.""" - - # The match configuration. If the configuration matches the data source being - # tapped, a tap will occur, with the result written to the configured output. - # Exactly one of :ref:`match - # ` and :ref:`match_config - # ` must be set. If - # both are set, the :ref:`match - # ` will be used. - match_config: "MatchPredicate" = betterproto.message_field(1) - # The match configuration. If the configuration matches the data source being - # tapped, a tap will occur, with the result written to the configured output. - # Exactly one of :ref:`match - # ` and :ref:`match_config - # ` must be set. If - # both are set, the :ref:`match - # ` will be used. - match: "__common_matcher_v3__.MatchPredicate" = betterproto.message_field(4) - # The tap output configuration. If a match configuration matches a data - # source being tapped, a tap will occur and the data will be written to the - # configured output. - output_config: "OutputConfig" = betterproto.message_field(2) - # [#not-implemented-hide:] Specify if Tap matching is enabled. The % of - # requests\connections for which the tap matching is enabled. When not - # enabled, the request\connection will not be recorded. .. note:: This - # field defaults to 100/:ref:`HUNDRED - # `. - tap_enabled: "__core_v3__.RuntimeFractionalPercent" = betterproto.message_field(3) - - def __post_init__(self) -> None: - super().__post_init__() - if self.match_config: - warnings.warn("TapConfig.match_config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class MatchPredicate(betterproto.Message): - """ - Tap match configuration. This is a recursive structure which allows complex - nested match configurations to be built using various logical operators. - [#next-free-field: 11] - """ - - # A set that describes a logical OR. If any member of the set matches, the - # match configuration matches. - or_match: "MatchPredicateMatchSet" = betterproto.message_field(1, group="rule") - # A set that describes a logical AND. If all members of the set match, the - # match configuration matches. - and_match: "MatchPredicateMatchSet" = betterproto.message_field(2, group="rule") - # A negation match. The match configuration will match if the negated match - # condition matches. - not_match: "MatchPredicate" = betterproto.message_field(3, group="rule") - # The match configuration will always match. - any_match: bool = betterproto.bool_field(4, group="rule") - # HTTP request headers match configuration. - http_request_headers_match: "HttpHeadersMatch" = betterproto.message_field( - 5, group="rule" - ) - # HTTP request trailers match configuration. - http_request_trailers_match: "HttpHeadersMatch" = betterproto.message_field( - 6, group="rule" - ) - # HTTP response headers match configuration. - http_response_headers_match: "HttpHeadersMatch" = betterproto.message_field( - 7, group="rule" - ) - # HTTP response trailers match configuration. - http_response_trailers_match: "HttpHeadersMatch" = betterproto.message_field( - 8, group="rule" - ) - # HTTP request generic body match configuration. - http_request_generic_body_match: "HttpGenericBodyMatch" = betterproto.message_field( - 9, group="rule" - ) - # HTTP response generic body match configuration. - http_response_generic_body_match: "HttpGenericBodyMatch" = ( - betterproto.message_field(10, group="rule") - ) - - -@dataclass(eq=False, repr=False) -class MatchPredicateMatchSet(betterproto.Message): - """A set of match configurations used for logical operations.""" - - # The list of rules that make up the set. - rules: List["MatchPredicate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HttpHeadersMatch(betterproto.Message): - """HTTP headers match configuration.""" - - # HTTP headers to match. - headers: List["__route_v3__.HeaderMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HttpGenericBodyMatch(betterproto.Message): - """ - HTTP generic body match configuration. List of text strings and hex strings - to be located in HTTP body. All specified strings must be found in the HTTP - body for positive match. The search may be limited to specified number of - bytes from the body start. .. attention:: Searching for patterns in HTTP - body is potentially cpu intensive. For each specified pattern, http body is - scanned byte by byte to find a match. If multiple patterns are specified, - the process is repeated for each pattern. If location of a pattern is - known, ``bytes_limit`` should be specified to scan only part of the http - body. - """ - - # Limits search to specified number of bytes - default zero (no limit - match - # entire captured buffer). - bytes_limit: int = betterproto.uint32_field(1) - # List of patterns to match. - patterns: List["HttpGenericBodyMatchGenericTextMatch"] = betterproto.message_field( - 2 - ) - - -@dataclass(eq=False, repr=False) -class HttpGenericBodyMatchGenericTextMatch(betterproto.Message): - # Text string to be located in HTTP body. - string_match: str = betterproto.string_field(1, group="rule") - # Sequence of bytes to be located in HTTP body. - binary_match: bytes = betterproto.bytes_field(2, group="rule") - - -@dataclass(eq=False, repr=False) -class OutputConfig(betterproto.Message): - """Tap output configuration.""" - - # Output sinks for tap data. Currently a single sink is allowed in the list. - # Once multiple sink types are supported this constraint will be relaxed. - sinks: List["OutputSink"] = betterproto.message_field(1) - # For buffered tapping, the maximum amount of received body that will be - # buffered prior to truncation. If truncation occurs, the :ref:`truncated - # ` field will be set. If not - # specified, the default is 1KiB. - max_buffered_rx_bytes: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # For buffered tapping, the maximum amount of transmitted body that will be - # buffered prior to truncation. If truncation occurs, the :ref:`truncated - # ` field will be set. If not - # specified, the default is 1KiB. - max_buffered_tx_bytes: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Indicates whether taps produce a single buffered message per tap, or - # multiple streamed messages per tap in the emitted :ref:`TraceWrapper - # ` messages. Note that streamed - # tapping does not mean that no buffering takes place. Buffering may be - # required if data is processed before a match can be determined. See the - # HTTP tap filter :ref:`streaming ` - # documentation for more information. - streaming: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class OutputSink(betterproto.Message): - """Tap output sink configuration.""" - - # Sink output format. - format: "OutputSinkFormat" = betterproto.enum_field(1) - # Tap output will be streamed out the :http:post:`/tap` admin endpoint. .. - # attention:: It is only allowed to specify the streaming admin output sink - # if the tap is being configured from the :http:post:`/tap` admin endpoint. - # Thus, if an extension has been configured to receive tap configuration - # from some other source (e.g., static file, XDS, etc.) configuring the - # streaming admin output type will fail. - streaming_admin: "StreamingAdminSink" = betterproto.message_field( - 2, group="output_sink_type" - ) - # Tap output will be written to a file per tap sink. - file_per_tap: "FilePerTapSink" = betterproto.message_field( - 3, group="output_sink_type" - ) - # [#not-implemented-hide:] GrpcService to stream data to. The format argument - # must be PROTO_BINARY. [#comment: TODO(samflattery): remove cleanup in - # uber_per_filter.cc once implemented] - streaming_grpc: "StreamingGrpcSink" = betterproto.message_field( - 4, group="output_sink_type" - ) - - -@dataclass(eq=False, repr=False) -class StreamingAdminSink(betterproto.Message): - """Streaming admin sink configuration.""" - - pass - - -@dataclass(eq=False, repr=False) -class FilePerTapSink(betterproto.Message): - """ - The file per tap sink outputs a discrete file for every tapped stream. - """ - - # Path prefix. The output file will be of the form _.pb, - # where is an identifier distinguishing the recorded trace for stream - # instances (the Envoy connection ID, HTTP stream ID, etc.). - path_prefix: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class StreamingGrpcSink(betterproto.Message): - """ - [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps - to an external gRPC server. - """ - - # Opaque identifier, that will be sent back to the streaming grpc server. - tap_id: str = betterproto.string_field(1) - # The gRPC server that hosts the Tap Sink Service. - grpc_service: "__core_v3__.GrpcService" = betterproto.message_field(2) - - -from ...common.matcher import v3 as __common_matcher_v3__ -from ...core import v3 as __core_v3__ -from ...route import v3 as __route_v3__ diff --git a/src/envoy_data_plane/envoy/config/trace/__init__.py b/src/envoy_data_plane/envoy/config/trace/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/trace/v2/__init__.py b/src/envoy_data_plane/envoy/config/trace/v2/__init__.py deleted file mode 100644 index 7aebcd9..0000000 --- a/src/envoy_data_plane/envoy/config/trace/v2/__init__.py +++ /dev/null @@ -1,223 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/trace/v2/datadog.proto, envoy/config/trace/v2/dynamic_ot.proto, envoy/config/trace/v2/http_tracer.proto, envoy/config/trace/v2/lightstep.proto, envoy/config/trace/v2/opencensus.proto, envoy/config/trace/v2/service.proto, envoy/config/trace/v2/trace.proto, envoy/config/trace/v2/zipkin.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class OpenCensusConfigTraceContext(betterproto.Enum): - NONE = 0 - TRACE_CONTEXT = 1 - GRPC_TRACE_BIN = 2 - CLOUD_TRACE_CONTEXT = 3 - B3 = 4 - - -class LightstepConfigPropagationMode(betterproto.Enum): - ENVOY = 0 - LIGHTSTEP = 1 - B3 = 2 - TRACE_CONTEXT = 3 - - -class ZipkinConfigCollectorEndpointVersion(betterproto.Enum): - HTTP_JSON_V1 = 0 - HTTP_JSON = 1 - HTTP_PROTO = 2 - GRPC = 3 - - -@dataclass(eq=False, repr=False) -class OpenCensusConfig(betterproto.Message): - """ - Configuration for the OpenCensus tracer. [#next-free-field: 15] - [#extension: envoy.tracers.opencensus] - """ - - # Configures tracing, e.g. the sampler, max number of annotations, etc. - trace_config: "____opencensus_proto_trace_v1__.TraceConfig" = ( - betterproto.message_field(1) - ) - # Enables the stdout exporter if set to true. This is intended for debugging - # purposes. - stdout_exporter_enabled: bool = betterproto.bool_field(2) - # Enables the Stackdriver exporter if set to true. The project_id must also - # be set. - stackdriver_exporter_enabled: bool = betterproto.bool_field(3) - # The Cloud project_id to use for Stackdriver tracing. - stackdriver_project_id: str = betterproto.string_field(4) - # (optional) By default, the Stackdriver exporter will connect to production - # Stackdriver. If stackdriver_address is non-empty, it will instead connect - # to this address, which is in the gRPC format: - # https://github.com/grpc/grpc/blob/master/doc/naming.md - stackdriver_address: str = betterproto.string_field(10) - # (optional) The gRPC server that hosts Stackdriver tracing service. Only - # Google gRPC is supported. If :ref:`target_uri - # ` is - # not provided, the default production Stackdriver address will be used. - stackdriver_grpc_service: "___api_v2_core__.GrpcService" = ( - betterproto.message_field(13) - ) - # Enables the Zipkin exporter if set to true. The url and service name must - # also be set. - zipkin_exporter_enabled: bool = betterproto.bool_field(5) - # The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" - zipkin_url: str = betterproto.string_field(6) - # Enables the OpenCensus Agent exporter if set to true. The ocagent_address - # or ocagent_grpc_service must also be set. - ocagent_exporter_enabled: bool = betterproto.bool_field(11) - # The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - # format: https://github.com/grpc/grpc/blob/master/doc/naming.md - # [#comment:TODO: deprecate this field] - ocagent_address: str = betterproto.string_field(12) - # (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC - # is supported. This is only used if the ocagent_address is left empty. - ocagent_grpc_service: "___api_v2_core__.GrpcService" = betterproto.message_field(14) - # List of incoming trace context headers we will accept. First one found - # wins. - incoming_trace_context: List[ - "OpenCensusConfigTraceContext" - ] = betterproto.enum_field(8) - # List of outgoing trace context headers we will produce. - outgoing_trace_context: List[ - "OpenCensusConfigTraceContext" - ] = betterproto.enum_field(9) - - -@dataclass(eq=False, repr=False) -class LightstepConfig(betterproto.Message): - """ - Configuration for the LightStep tracer. [#extension: - envoy.tracers.lightstep] - """ - - # The cluster manager cluster that hosts the LightStep collectors. - collector_cluster: str = betterproto.string_field(1) - # File containing the access token to the `LightStep - # `_ API. - access_token_file: str = betterproto.string_field(2) - # Propagation modes to use by LightStep's tracer. - propagation_modes: List["LightstepConfigPropagationMode"] = betterproto.enum_field( - 3 - ) - - -@dataclass(eq=False, repr=False) -class DatadogConfig(betterproto.Message): - """ - Configuration for the Datadog tracer. [#extension: envoy.tracers.datadog] - """ - - # The cluster to use for submitting traces to the Datadog agent. - collector_cluster: str = betterproto.string_field(1) - # The name used for the service when traces are generated by envoy. - service_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class DynamicOtConfig(betterproto.Message): - """ - DynamicOtConfig is used to dynamically load a tracer from a shared library - that implements the `OpenTracing dynamic loading API - `_. [#extension: - envoy.tracers.dynamic_ot] - """ - - # Dynamic library implementing the `OpenTracing API - # `_. - library: str = betterproto.string_field(1) - # The configuration to use when creating a tracer from the given dynamic - # library. - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Tracing(betterproto.Message): - """ - The tracing configuration specifies settings for an HTTP tracer provider - used by Envoy. Envoy may support other tracers in the future, but right now - the HTTP tracer is the only one supported. .. attention:: Use of this - message type has been deprecated in favor of direct use of - :ref:`Tracing.Http `. - """ - - # Provides configuration for the HTTP tracer. - http: "TracingHttp" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class TracingHttp(betterproto.Message): - """ - Configuration for an HTTP tracer provider used by Envoy. The configuration - is defined by the :ref:`HttpConnectionManager.Tracing ` - :ref:`provider ` field. - """ - - # The name of the HTTP trace driver to instantiate. The name must match a - # supported HTTP trace driver. Built-in trace drivers: - - # *envoy.tracers.lightstep* - *envoy.tracers.zipkin* - - # *envoy.tracers.dynamic_ot* - *envoy.tracers.datadog* - - # *envoy.tracers.opencensus* - *envoy.tracers.xray* - name: str = betterproto.string_field(1) - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 2, group="config_type" - ) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.config: - warnings.warn("TracingHttp.config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class TraceServiceConfig(betterproto.Message): - """Configuration structure.""" - - # The upstream gRPC cluster that hosts the metrics service. - grpc_service: "___api_v2_core__.GrpcService" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ZipkinConfig(betterproto.Message): - """ - Configuration for the Zipkin tracer. [#extension: envoy.tracers.zipkin] - [#next-free-field: 6] - """ - - # The cluster manager cluster that hosts the Zipkin collectors. Note that the - # Zipkin cluster must be defined in the :ref:`Bootstrap static cluster - # resources - # `. - collector_cluster: str = betterproto.string_field(1) - # The API endpoint of the Zipkin service where the spans will be sent. When - # using a standard Zipkin installation, the API endpoint is typically - # /api/v1/spans, which is the default value. - collector_endpoint: str = betterproto.string_field(2) - # Determines whether a 128bit trace id will be used when creating a new trace - # instance. The default value is false, which will result in a 64 bit trace - # id being used. - trace_id_128_bit: bool = betterproto.bool_field(3) - # Determines whether client and server spans will share the same span - # context. The default value is true. - shared_span_context: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # Determines the selected collector endpoint version. By default, the - # ``HTTP_JSON_V1`` will be used. - collector_endpoint_version: "ZipkinConfigCollectorEndpointVersion" = ( - betterproto.enum_field(5) - ) - - -from .....opencensus.proto.trace import v1 as ____opencensus_proto_trace_v1__ -from ....api.v2 import core as ___api_v2_core__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/trace/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/trace/v2alpha/__init__.py deleted file mode 100644 index 60177f7..0000000 --- a/src/envoy_data_plane/envoy/config/trace/v2alpha/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/trace/v2alpha/xray.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class XRayConfig(betterproto.Message): - # The UDP endpoint of the X-Ray Daemon where the spans will be sent. If this - # value is not set, the default value of 127.0.0.1:2000 will be used. - daemon_endpoint: "___api_v2_core__.SocketAddress" = betterproto.message_field(1) - # The name of the X-Ray segment. - segment_name: str = betterproto.string_field(2) - # The location of a local custom sampling rules JSON file. For an example of - # the sampling rules see: `X-Ray SDK documentation - # `_ - sampling_rule_manifest: "___api_v2_core__.DataSource" = betterproto.message_field(3) - - -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/config/trace/v3/__init__.py b/src/envoy_data_plane/envoy/config/trace/v3/__init__.py deleted file mode 100644 index b564061..0000000 --- a/src/envoy_data_plane/envoy/config/trace/v3/__init__.py +++ /dev/null @@ -1,313 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/trace/v3/datadog.proto, envoy/config/trace/v3/dynamic_ot.proto, envoy/config/trace/v3/http_tracer.proto, envoy/config/trace/v3/lightstep.proto, envoy/config/trace/v3/opencensus.proto, envoy/config/trace/v3/service.proto, envoy/config/trace/v3/skywalking.proto, envoy/config/trace/v3/trace.proto, envoy/config/trace/v3/xray.proto, envoy/config/trace/v3/zipkin.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class OpenCensusConfigTraceContext(betterproto.Enum): - NONE = 0 - TRACE_CONTEXT = 1 - GRPC_TRACE_BIN = 2 - CLOUD_TRACE_CONTEXT = 3 - B3 = 4 - - -class LightstepConfigPropagationMode(betterproto.Enum): - ENVOY = 0 - LIGHTSTEP = 1 - B3 = 2 - TRACE_CONTEXT = 3 - - -class ZipkinConfigCollectorEndpointVersion(betterproto.Enum): - DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 - HTTP_JSON = 1 - HTTP_PROTO = 2 - GRPC = 3 - - -@dataclass(eq=False, repr=False) -class OpenCensusConfig(betterproto.Message): - """ - Configuration for the OpenCensus tracer. [#next-free-field: 15] - [#extension: envoy.tracers.opencensus] - """ - - # Configures tracing, e.g. the sampler, max number of annotations, etc. - trace_config: "____opencensus_proto_trace_v1__.TraceConfig" = ( - betterproto.message_field(1) - ) - # Enables the stdout exporter if set to true. This is intended for debugging - # purposes. - stdout_exporter_enabled: bool = betterproto.bool_field(2) - # Enables the Stackdriver exporter if set to true. The project_id must also - # be set. - stackdriver_exporter_enabled: bool = betterproto.bool_field(3) - # The Cloud project_id to use for Stackdriver tracing. - stackdriver_project_id: str = betterproto.string_field(4) - # (optional) By default, the Stackdriver exporter will connect to production - # Stackdriver. If stackdriver_address is non-empty, it will instead connect - # to this address, which is in the gRPC format: - # https://github.com/grpc/grpc/blob/master/doc/naming.md - stackdriver_address: str = betterproto.string_field(10) - # (optional) The gRPC server that hosts Stackdriver tracing service. Only - # Google gRPC is supported. If :ref:`target_uri - # ` is - # not provided, the default production Stackdriver address will be used. - stackdriver_grpc_service: "__core_v3__.GrpcService" = betterproto.message_field(13) - # Enables the Zipkin exporter if set to true. The url and service name must - # also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin - # tracer `. - zipkin_exporter_enabled: bool = betterproto.bool_field(5) - # The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is - # deprecated, prefer to use Envoy's :ref:`native Zipkin tracer - # `. - zipkin_url: str = betterproto.string_field(6) - # Enables the OpenCensus Agent exporter if set to true. The ocagent_address - # or ocagent_grpc_service must also be set. - ocagent_exporter_enabled: bool = betterproto.bool_field(11) - # The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - # format: https://github.com/grpc/grpc/blob/master/doc/naming.md - # [#comment:TODO: deprecate this field] - ocagent_address: str = betterproto.string_field(12) - # (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC - # is supported. This is only used if the ocagent_address is left empty. - ocagent_grpc_service: "__core_v3__.GrpcService" = betterproto.message_field(14) - # List of incoming trace context headers we will accept. First one found - # wins. - incoming_trace_context: List[ - "OpenCensusConfigTraceContext" - ] = betterproto.enum_field(8) - # List of outgoing trace context headers we will produce. - outgoing_trace_context: List[ - "OpenCensusConfigTraceContext" - ] = betterproto.enum_field(9) - - def __post_init__(self) -> None: - super().__post_init__() - if self.zipkin_exporter_enabled: - warnings.warn( - "OpenCensusConfig.zipkin_exporter_enabled is deprecated", - DeprecationWarning, - ) - if self.zipkin_url: - warnings.warn( - "OpenCensusConfig.zipkin_url is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class LightstepConfig(betterproto.Message): - """ - Configuration for the LightStep tracer. [#extension: - envoy.tracers.lightstep] - """ - - # The cluster manager cluster that hosts the LightStep collectors. - collector_cluster: str = betterproto.string_field(1) - # File containing the access token to the `LightStep - # `_ API. - access_token_file: str = betterproto.string_field(2) - # Access token to the `LightStep `_ API. - access_token: "__core_v3__.DataSource" = betterproto.message_field(4) - # Propagation modes to use by LightStep's tracer. - propagation_modes: List["LightstepConfigPropagationMode"] = betterproto.enum_field( - 3 - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.access_token_file: - warnings.warn( - "LightstepConfig.access_token_file is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class DatadogConfig(betterproto.Message): - """ - Configuration for the Datadog tracer. [#extension: envoy.tracers.datadog] - """ - - # The cluster to use for submitting traces to the Datadog agent. - collector_cluster: str = betterproto.string_field(1) - # The name used for the service when traces are generated by envoy. - service_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class DynamicOtConfig(betterproto.Message): - """ - DynamicOtConfig is used to dynamically load a tracer from a shared library - that implements the `OpenTracing dynamic loading API - `_. [#extension: - envoy.tracers.dynamic_ot] - """ - - # Dynamic library implementing the `OpenTracing API - # `_. - library: str = betterproto.string_field(1) - # The configuration to use when creating a tracer from the given dynamic - # library. - config: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Tracing(betterproto.Message): - """ - The tracing configuration specifies settings for an HTTP tracer provider - used by Envoy. Envoy may support other tracers in the future, but right now - the HTTP tracer is the only one supported. .. attention:: Use of this - message type has been deprecated in favor of direct use of - :ref:`Tracing.Http `. - """ - - # Provides configuration for the HTTP tracer. - http: "TracingHttp" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class TracingHttp(betterproto.Message): - """ - Configuration for an HTTP tracer provider used by Envoy. The configuration - is defined by the :ref:`HttpConnectionManager.Tracing ` :ref:`provider ` field. - """ - - # The name of the HTTP trace driver to instantiate. The name must match a - # supported HTTP trace driver. See the :ref:`extensions listed in - # typed_config below ` for the default list - # of the HTTP trace driver. - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class TraceServiceConfig(betterproto.Message): - """Configuration structure.""" - - # The upstream gRPC cluster that hosts the metrics service. - grpc_service: "__core_v3__.GrpcService" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ZipkinConfig(betterproto.Message): - """ - Configuration for the Zipkin tracer. [#extension: envoy.tracers.zipkin] - [#next-free-field: 7] - """ - - # The cluster manager cluster that hosts the Zipkin collectors. - collector_cluster: str = betterproto.string_field(1) - # The API endpoint of the Zipkin service where the spans will be sent. When - # using a standard Zipkin installation. - collector_endpoint: str = betterproto.string_field(2) - # Determines whether a 128bit trace id will be used when creating a new trace - # instance. The default value is false, which will result in a 64 bit trace - # id being used. - trace_id_128_bit: bool = betterproto.bool_field(3) - # Determines whether client and server spans will share the same span - # context. The default value is true. - shared_span_context: Optional[bool] = betterproto.message_field( - 4, wraps=betterproto.TYPE_BOOL - ) - # Determines the selected collector endpoint version. - collector_endpoint_version: "ZipkinConfigCollectorEndpointVersion" = ( - betterproto.enum_field(5) - ) - # Optional hostname to use when sending spans to the collector_cluster. - # Useful for collectors that require a specific hostname. Defaults to - # :ref:`collector_cluster - # ` above. - collector_hostname: str = betterproto.string_field(6) - - -@dataclass(eq=False, repr=False) -class XRayConfig(betterproto.Message): - """[#extension: envoy.tracers.xray]""" - - # The UDP endpoint of the X-Ray Daemon where the spans will be sent. If this - # value is not set, the default value of 127.0.0.1:2000 will be used. - daemon_endpoint: "__core_v3__.SocketAddress" = betterproto.message_field(1) - # The name of the X-Ray segment. - segment_name: str = betterproto.string_field(2) - # The location of a local custom sampling rules JSON file. For an example of - # the sampling rules see: `X-Ray SDK documentation - # `_ - sampling_rule_manifest: "__core_v3__.DataSource" = betterproto.message_field(3) - # Optional custom fields to be added to each trace segment. see: `X-Ray - # Segment Document documentation - # `__ - segment_fields: "XRayConfigSegmentFields" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class XRayConfigSegmentFields(betterproto.Message): - # The type of AWS resource, e.g. "AWS::AppMesh::Proxy". - origin: str = betterproto.string_field(1) - # AWS resource metadata dictionary. See: `X-Ray Segment Document - # documentation `__ - aws: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class SkyWalkingConfig(betterproto.Message): - """ - Configuration for the SkyWalking tracer. Please note that if SkyWalking - tracer is used as the provider of http tracer, then :ref:`start_child_span - ` in the router must be set to true to get the correct topology and - tracing data. Moreover, SkyWalking Tracer does not support SkyWalking - extension header (``sw8-x``) temporarily. [#extension: - envoy.tracers.skywalking] - """ - - # SkyWalking collector service. - grpc_service: "__core_v3__.GrpcService" = betterproto.message_field(1) - client_config: "ClientConfig" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClientConfig(betterproto.Message): - """Client config for SkyWalking tracer.""" - - # Service name for SkyWalking tracer. If this field is empty, then local - # service cluster name that configured by :ref:`Bootstrap node - # ` message's - # :ref:`cluster ` field or - # command line option :option:`--service-cluster` will be used. If both this - # field and local service cluster name are empty, ``EnvoyProxy`` is used as - # the service name by default. - service_name: str = betterproto.string_field(1) - # Service instance name for SkyWalking tracer. If this field is empty, then - # local service node that configured by :ref:`Bootstrap node - # ` message's :ref:`id - # ` field or command line option - # :option:`--service-node` will be used. If both this field and local service - # node are empty, ``EnvoyProxy`` is used as the instance name by default. - instance_name: str = betterproto.string_field(2) - # Inline authentication token string. - backend_token: str = betterproto.string_field(3, group="backend_token_specifier") - # Envoy caches the segment in memory when the SkyWalking backend service is - # temporarily unavailable. This field specifies the maximum number of - # segments that can be cached. If not specified, the default is 1024. - max_cache_size: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - - -from .....opencensus.proto.trace import v1 as ____opencensus_proto_trace_v1__ -from ...core import v3 as __core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/config/transport_socket/__init__.py b/src/envoy_data_plane/envoy/config/transport_socket/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/transport_socket/alts/__init__.py b/src/envoy_data_plane/envoy/config/transport_socket/alts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/transport_socket/alts/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/transport_socket/alts/v2alpha/__init__.py deleted file mode 100644 index 8696412..0000000 --- a/src/envoy_data_plane/envoy/config/transport_socket/alts/v2alpha/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/transport_socket/alts/v2alpha/alts.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Alts(betterproto.Message): - """ - Configuration for ALTS transport socket. This provides Google's ALTS - protocol to Envoy. https://cloud.google.com/security/encryption-in- - transit/application-layer-transport-security/ - """ - - # The location of a handshaker service, this is usually 169.254.169.254:8080 - # on GCE. - handshaker_service: str = betterproto.string_field(1) - # The acceptable service accounts from peer, peers not in the list will be - # rejected in the handshake validation step. If empty, no validation will be - # performed. - peer_service_accounts: List[str] = betterproto.string_field(2) diff --git a/src/envoy_data_plane/envoy/config/transport_socket/raw_buffer/__init__.py b/src/envoy_data_plane/envoy/config/transport_socket/raw_buffer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/transport_socket/raw_buffer/v2/__init__.py b/src/envoy_data_plane/envoy/config/transport_socket/raw_buffer/v2/__init__.py deleted file mode 100644 index 240ace9..0000000 --- a/src/envoy_data_plane/envoy/config/transport_socket/raw_buffer/v2/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RawBuffer(betterproto.Message): - """Configuration for raw buffer transport socket.""" - - pass diff --git a/src/envoy_data_plane/envoy/config/transport_socket/tap/__init__.py b/src/envoy_data_plane/envoy/config/transport_socket/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/config/transport_socket/tap/v2alpha/__init__.py b/src/envoy_data_plane/envoy/config/transport_socket/tap/v2alpha/__init__.py deleted file mode 100644 index e29b803..0000000 --- a/src/envoy_data_plane/envoy/config/transport_socket/tap/v2alpha/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/config/transport_socket/tap/v2alpha/tap.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Tap(betterproto.Message): - """ - Configuration for tap transport socket. This wraps another transport - socket, providing the ability to interpose and record in plain text any - traffic that is surfaced to Envoy. - """ - - # Common configuration for the tap transport socket. - common_config: "___common_tap_v2_alpha__.CommonExtensionConfig" = ( - betterproto.message_field(1) - ) - # The underlying transport socket being wrapped. - transport_socket: "____api_v2_core__.TransportSocket" = betterproto.message_field(2) - - -from .....api.v2 import core as ____api_v2_core__ -from ....common.tap import v2alpha as ___common_tap_v2_alpha__ diff --git a/src/envoy_data_plane/envoy/data/__init__.py b/src/envoy_data_plane/envoy/data/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/data/accesslog/__init__.py b/src/envoy_data_plane/envoy/data/accesslog/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/data/accesslog/v2/__init__.py b/src/envoy_data_plane/envoy/data/accesslog/v2/__init__.py deleted file mode 100644 index 98f9f9e..0000000 --- a/src/envoy_data_plane/envoy/data/accesslog/v2/__init__.py +++ /dev/null @@ -1,332 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/accesslog/v2/accesslog.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime, timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class HttpAccessLogEntryHttpVersion(betterproto.Enum): - PROTOCOL_UNSPECIFIED = 0 - HTTP10 = 1 - HTTP11 = 2 - HTTP2 = 3 - HTTP3 = 4 - - -class ResponseFlagsUnauthorizedReason(betterproto.Enum): - REASON_UNSPECIFIED = 0 - EXTERNAL_SERVICE = 1 - - -class TlsPropertiesTlsVersion(betterproto.Enum): - VERSION_UNSPECIFIED = 0 - TLSv1 = 1 - TLSv1_1 = 2 - TLSv1_2 = 3 - TLSv1_3 = 4 - - -@dataclass(eq=False, repr=False) -class TcpAccessLogEntry(betterproto.Message): - # Common properties shared by all Envoy access logs. - common_properties: "AccessLogCommon" = betterproto.message_field(1) - # Properties of the TCP connection. - connection_properties: "ConnectionProperties" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HttpAccessLogEntry(betterproto.Message): - # Common properties shared by all Envoy access logs. - common_properties: "AccessLogCommon" = betterproto.message_field(1) - protocol_version: "HttpAccessLogEntryHttpVersion" = betterproto.enum_field(2) - # Description of the incoming HTTP request. - request: "HttpRequestProperties" = betterproto.message_field(3) - # Description of the outgoing HTTP response. - response: "HttpResponseProperties" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ConnectionProperties(betterproto.Message): - """Defines fields for a connection""" - - # Number of bytes received from downstream. - received_bytes: int = betterproto.uint64_field(1) - # Number of bytes sent to downstream. - sent_bytes: int = betterproto.uint64_field(2) - - -@dataclass(eq=False, repr=False) -class AccessLogCommon(betterproto.Message): - """ - Defines fields that are shared by all Envoy access logs. [#next-free-field: - 22] - """ - - # [#not-implemented-hide:] This field indicates the rate at which this log - # entry was sampled. Valid range is (0.0, 1.0]. - sample_rate: float = betterproto.double_field(1) - # This field is the remote/origin address on which the request from the user - # was received. Note: This may not be the physical peer. E.g, if the remote - # address is inferred from for example the x-forwarder-for header, proxy - # protocol, etc. - downstream_remote_address: "___api_v2_core__.Address" = betterproto.message_field(2) - # This field is the local/destination address on which the request from the - # user was received. - downstream_local_address: "___api_v2_core__.Address" = betterproto.message_field(3) - # If the connection is secure,S this field will contain TLS properties. - tls_properties: "TlsProperties" = betterproto.message_field(4) - # The time that Envoy started servicing this request. This is effectively the - # time that the first downstream byte is received. - start_time: datetime = betterproto.message_field(5) - # Interval between the first downstream byte received and the last downstream - # byte received (i.e. time it takes to receive a request). - time_to_last_rx_byte: timedelta = betterproto.message_field(6) - # Interval between the first downstream byte received and the first upstream - # byte sent. There may by considerable delta between *time_to_last_rx_byte* - # and this value due to filters. Additionally, the same caveats apply as - # documented in *time_to_last_downstream_tx_byte* about not accounting for - # kernel socket buffer time, etc. - time_to_first_upstream_tx_byte: timedelta = betterproto.message_field(7) - # Interval between the first downstream byte received and the last upstream - # byte sent. There may by considerable delta between *time_to_last_rx_byte* - # and this value due to filters. Additionally, the same caveats apply as - # documented in *time_to_last_downstream_tx_byte* about not accounting for - # kernel socket buffer time, etc. - time_to_last_upstream_tx_byte: timedelta = betterproto.message_field(8) - # Interval between the first downstream byte received and the first upstream - # byte received (i.e. time it takes to start receiving a response). - time_to_first_upstream_rx_byte: timedelta = betterproto.message_field(9) - # Interval between the first downstream byte received and the last upstream - # byte received (i.e. time it takes to receive a complete response). - time_to_last_upstream_rx_byte: timedelta = betterproto.message_field(10) - # Interval between the first downstream byte received and the first - # downstream byte sent. There may be a considerable delta between the - # *time_to_first_upstream_rx_byte* and this field due to filters. - # Additionally, the same caveats apply as documented in - # *time_to_last_downstream_tx_byte* about not accounting for kernel socket - # buffer time, etc. - time_to_first_downstream_tx_byte: timedelta = betterproto.message_field(11) - # Interval between the first downstream byte received and the last downstream - # byte sent. Depending on protocol, buffering, windowing, filters, etc. there - # may be a considerable delta between *time_to_last_upstream_rx_byte* and - # this field. Note also that this is an approximate time. In the current - # implementation it does not include kernel socket buffer time. In the - # current implementation it also does not include send window buffering - # inside the HTTP/2 codec. In the future it is likely that work will be done - # to make this duration more accurate. - time_to_last_downstream_tx_byte: timedelta = betterproto.message_field(12) - # The upstream remote/destination address that handles this exchange. This - # does not include retries. - upstream_remote_address: "___api_v2_core__.Address" = betterproto.message_field(13) - # The upstream local/origin address that handles this exchange. This does not - # include retries. - upstream_local_address: "___api_v2_core__.Address" = betterproto.message_field(14) - # The upstream cluster that *upstream_remote_address* belongs to. - upstream_cluster: str = betterproto.string_field(15) - # Flags indicating occurrences during request/response processing. - response_flags: "ResponseFlags" = betterproto.message_field(16) - # All metadata encountered during request processing, including endpoint - # selection. This can be used to associate IDs attached to the various - # configurations used to process this request with the access log entry. For - # example, a route created from a higher level forwarding rule with some ID - # can place that ID in this field and cross reference later. It can also be - # used to determine if a canary endpoint was used or not. - metadata: "___api_v2_core__.Metadata" = betterproto.message_field(17) - # If upstream connection failed due to transport socket (e.g. TLS handshake), - # provides the failure reason from the transport socket. The format of this - # field depends on the configured upstream transport socket. Common TLS - # failures are in :ref:`TLS trouble shooting - # `. - upstream_transport_failure_reason: str = betterproto.string_field(18) - # The name of the route - route_name: str = betterproto.string_field(19) - # This field is the downstream direct remote address on which the request - # from the user was received. Note: This is always the physical peer, even if - # the remote address is inferred from for example the x-forwarder-for header, - # proxy protocol, etc. - downstream_direct_remote_address: "___api_v2_core__.Address" = ( - betterproto.message_field(20) - ) - # Map of filter state in stream info that have been configured to be logged. - # If the filter state serialized to any message other than - # `google.protobuf.Any` it will be packed into `google.protobuf.Any`. - filter_state_objects: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(21, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - - -@dataclass(eq=False, repr=False) -class ResponseFlags(betterproto.Message): - """ - Flags indicating occurrences during request/response processing. [#next- - free-field: 20] - """ - - # Indicates local server healthcheck failed. - failed_local_healthcheck: bool = betterproto.bool_field(1) - # Indicates there was no healthy upstream. - no_healthy_upstream: bool = betterproto.bool_field(2) - # Indicates an there was an upstream request timeout. - upstream_request_timeout: bool = betterproto.bool_field(3) - # Indicates local codec level reset was sent on the stream. - local_reset: bool = betterproto.bool_field(4) - # Indicates remote codec level reset was received on the stream. - upstream_remote_reset: bool = betterproto.bool_field(5) - # Indicates there was a local reset by a connection pool due to an initial - # connection failure. - upstream_connection_failure: bool = betterproto.bool_field(6) - # Indicates the stream was reset due to an upstream connection termination. - upstream_connection_termination: bool = betterproto.bool_field(7) - # Indicates the stream was reset because of a resource overflow. - upstream_overflow: bool = betterproto.bool_field(8) - # Indicates no route was found for the request. - no_route_found: bool = betterproto.bool_field(9) - # Indicates that the request was delayed before proxying. - delay_injected: bool = betterproto.bool_field(10) - # Indicates that the request was aborted with an injected error code. - fault_injected: bool = betterproto.bool_field(11) - # Indicates that the request was rate-limited locally. - rate_limited: bool = betterproto.bool_field(12) - # Indicates if the request was deemed unauthorized and the reason for it. - unauthorized_details: "ResponseFlagsUnauthorized" = betterproto.message_field(13) - # Indicates that the request was rejected because there was an error in rate - # limit service. - rate_limit_service_error: bool = betterproto.bool_field(14) - # Indicates the stream was reset due to a downstream connection termination. - downstream_connection_termination: bool = betterproto.bool_field(15) - # Indicates that the upstream retry limit was exceeded, resulting in a - # downstream error. - upstream_retry_limit_exceeded: bool = betterproto.bool_field(16) - # Indicates that the stream idle timeout was hit, resulting in a downstream - # 408. - stream_idle_timeout: bool = betterproto.bool_field(17) - # Indicates that the request was rejected because an envoy request header - # failed strict validation. - invalid_envoy_request_headers: bool = betterproto.bool_field(18) - # Indicates there was an HTTP protocol error on the downstream request. - downstream_protocol_error: bool = betterproto.bool_field(19) - - -@dataclass(eq=False, repr=False) -class ResponseFlagsUnauthorized(betterproto.Message): - reason: "ResponseFlagsUnauthorizedReason" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class TlsProperties(betterproto.Message): - """Properties of a negotiated TLS connection. [#next-free-field: 7]""" - - # Version of TLS that was negotiated. - tls_version: "TlsPropertiesTlsVersion" = betterproto.enum_field(1) - # TLS cipher suite negotiated during handshake. The value is a four-digit hex - # code defined by the IANA TLS Cipher Suite Registry (e.g. ``009C`` for - # ``TLS_RSA_WITH_AES_128_GCM_SHA256``). Here it is expressed as an integer. - tls_cipher_suite: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # SNI hostname from handshake. - tls_sni_hostname: str = betterproto.string_field(3) - # Properties of the local certificate used to negotiate TLS. - local_certificate_properties: "TlsPropertiesCertificateProperties" = ( - betterproto.message_field(4) - ) - # Properties of the peer certificate used to negotiate TLS. - peer_certificate_properties: "TlsPropertiesCertificateProperties" = ( - betterproto.message_field(5) - ) - # The TLS session ID. - tls_session_id: str = betterproto.string_field(6) - - -@dataclass(eq=False, repr=False) -class TlsPropertiesCertificateProperties(betterproto.Message): - # SANs present in the certificate. - subject_alt_name: List[ - "TlsPropertiesCertificatePropertiesSubjectAltName" - ] = betterproto.message_field(1) - # The subject field of the certificate. - subject: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class TlsPropertiesCertificatePropertiesSubjectAltName(betterproto.Message): - uri: str = betterproto.string_field(1, group="san") - # [#not-implemented-hide:] - dns: str = betterproto.string_field(2, group="san") - - -@dataclass(eq=False, repr=False) -class HttpRequestProperties(betterproto.Message): - """[#next-free-field: 14]""" - - # The request method (RFC 7231/2616). - request_method: "___api_v2_core__.RequestMethod" = betterproto.enum_field(1) - # The scheme portion of the incoming request URI. - scheme: str = betterproto.string_field(2) - # HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. - authority: str = betterproto.string_field(3) - # The port of the incoming request URI (unused currently, as port is composed - # onto authority). - port: Optional[int] = betterproto.message_field(4, wraps=betterproto.TYPE_UINT32) - # The path portion from the incoming request URI. - path: str = betterproto.string_field(5) - # Value of the ``User-Agent`` request header. - user_agent: str = betterproto.string_field(6) - # Value of the ``Referer`` request header. - referer: str = betterproto.string_field(7) - # Value of the ``X-Forwarded-For`` request header. - forwarded_for: str = betterproto.string_field(8) - # Value of the ``X-Request-Id`` request header This header is used by Envoy - # to uniquely identify a request. It will be generated for all external - # requests and internal requests that do not already have a request ID. - request_id: str = betterproto.string_field(9) - # Value of the ``X-Envoy-Original-Path`` request header. - original_path: str = betterproto.string_field(10) - # Size of the HTTP request headers in bytes. This value is captured from the - # OSI layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - request_headers_bytes: int = betterproto.uint64_field(11) - # Size of the HTTP request body in bytes. This value is captured from the OSI - # layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - request_body_bytes: int = betterproto.uint64_field(12) - # Map of additional headers that have been configured to be logged. - request_headers: Dict[str, str] = betterproto.map_field( - 13, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass(eq=False, repr=False) -class HttpResponseProperties(betterproto.Message): - """[#next-free-field: 7]""" - - # The HTTP response code returned by Envoy. - response_code: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Size of the HTTP response headers in bytes. This value is captured from the - # OSI layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - response_headers_bytes: int = betterproto.uint64_field(2) - # Size of the HTTP response body in bytes. This value is captured from the - # OSI layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - response_body_bytes: int = betterproto.uint64_field(3) - # Map of additional headers configured to be logged. - response_headers: Dict[str, str] = betterproto.map_field( - 4, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # Map of trailers configured to be logged. - response_trailers: Dict[str, str] = betterproto.map_field( - 5, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # The HTTP response code details. - response_code_details: str = betterproto.string_field(6) - - -from ....api.v2 import core as ___api_v2_core__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/data/accesslog/v3/__init__.py b/src/envoy_data_plane/envoy/data/accesslog/v3/__init__.py deleted file mode 100644 index 505d8f4..0000000 --- a/src/envoy_data_plane/envoy/data/accesslog/v3/__init__.py +++ /dev/null @@ -1,362 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/accesslog/v3/accesslog.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime, timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class HttpAccessLogEntryHttpVersion(betterproto.Enum): - PROTOCOL_UNSPECIFIED = 0 - HTTP10 = 1 - HTTP11 = 2 - HTTP2 = 3 - HTTP3 = 4 - - -class ResponseFlagsUnauthorizedReason(betterproto.Enum): - REASON_UNSPECIFIED = 0 - EXTERNAL_SERVICE = 1 - - -class TlsPropertiesTlsVersion(betterproto.Enum): - VERSION_UNSPECIFIED = 0 - TLSv1 = 1 - TLSv1_1 = 2 - TLSv1_2 = 3 - TLSv1_3 = 4 - - -@dataclass(eq=False, repr=False) -class TcpAccessLogEntry(betterproto.Message): - # Common properties shared by all Envoy access logs. - common_properties: "AccessLogCommon" = betterproto.message_field(1) - # Properties of the TCP connection. - connection_properties: "ConnectionProperties" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HttpAccessLogEntry(betterproto.Message): - # Common properties shared by all Envoy access logs. - common_properties: "AccessLogCommon" = betterproto.message_field(1) - protocol_version: "HttpAccessLogEntryHttpVersion" = betterproto.enum_field(2) - # Description of the incoming HTTP request. - request: "HttpRequestProperties" = betterproto.message_field(3) - # Description of the outgoing HTTP response. - response: "HttpResponseProperties" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ConnectionProperties(betterproto.Message): - """Defines fields for a connection""" - - # Number of bytes received from downstream. - received_bytes: int = betterproto.uint64_field(1) - # Number of bytes sent to downstream. - sent_bytes: int = betterproto.uint64_field(2) - - -@dataclass(eq=False, repr=False) -class AccessLogCommon(betterproto.Message): - """ - Defines fields that are shared by all Envoy access logs. [#next-free-field: - 23] - """ - - # [#not-implemented-hide:] This field indicates the rate at which this log - # entry was sampled. Valid range is (0.0, 1.0]. - sample_rate: float = betterproto.double_field(1) - # This field is the remote/origin address on which the request from the user - # was received. Note: This may not be the physical peer. E.g, if the remote - # address is inferred from for example the x-forwarder-for header, proxy - # protocol, etc. - downstream_remote_address: "___config_core_v3__.Address" = ( - betterproto.message_field(2) - ) - # This field is the local/destination address on which the request from the - # user was received. - downstream_local_address: "___config_core_v3__.Address" = betterproto.message_field( - 3 - ) - # If the connection is secure,S this field will contain TLS properties. - tls_properties: "TlsProperties" = betterproto.message_field(4) - # The time that Envoy started servicing this request. This is effectively the - # time that the first downstream byte is received. - start_time: datetime = betterproto.message_field(5) - # Interval between the first downstream byte received and the last downstream - # byte received (i.e. time it takes to receive a request). - time_to_last_rx_byte: timedelta = betterproto.message_field(6) - # Interval between the first downstream byte received and the first upstream - # byte sent. There may by considerable delta between *time_to_last_rx_byte* - # and this value due to filters. Additionally, the same caveats apply as - # documented in *time_to_last_downstream_tx_byte* about not accounting for - # kernel socket buffer time, etc. - time_to_first_upstream_tx_byte: timedelta = betterproto.message_field(7) - # Interval between the first downstream byte received and the last upstream - # byte sent. There may by considerable delta between *time_to_last_rx_byte* - # and this value due to filters. Additionally, the same caveats apply as - # documented in *time_to_last_downstream_tx_byte* about not accounting for - # kernel socket buffer time, etc. - time_to_last_upstream_tx_byte: timedelta = betterproto.message_field(8) - # Interval between the first downstream byte received and the first upstream - # byte received (i.e. time it takes to start receiving a response). - time_to_first_upstream_rx_byte: timedelta = betterproto.message_field(9) - # Interval between the first downstream byte received and the last upstream - # byte received (i.e. time it takes to receive a complete response). - time_to_last_upstream_rx_byte: timedelta = betterproto.message_field(10) - # Interval between the first downstream byte received and the first - # downstream byte sent. There may be a considerable delta between the - # *time_to_first_upstream_rx_byte* and this field due to filters. - # Additionally, the same caveats apply as documented in - # *time_to_last_downstream_tx_byte* about not accounting for kernel socket - # buffer time, etc. - time_to_first_downstream_tx_byte: timedelta = betterproto.message_field(11) - # Interval between the first downstream byte received and the last downstream - # byte sent. Depending on protocol, buffering, windowing, filters, etc. there - # may be a considerable delta between *time_to_last_upstream_rx_byte* and - # this field. Note also that this is an approximate time. In the current - # implementation it does not include kernel socket buffer time. In the - # current implementation it also does not include send window buffering - # inside the HTTP/2 codec. In the future it is likely that work will be done - # to make this duration more accurate. - time_to_last_downstream_tx_byte: timedelta = betterproto.message_field(12) - # The upstream remote/destination address that handles this exchange. This - # does not include retries. - upstream_remote_address: "___config_core_v3__.Address" = betterproto.message_field( - 13 - ) - # The upstream local/origin address that handles this exchange. This does not - # include retries. - upstream_local_address: "___config_core_v3__.Address" = betterproto.message_field( - 14 - ) - # The upstream cluster that *upstream_remote_address* belongs to. - upstream_cluster: str = betterproto.string_field(15) - # Flags indicating occurrences during request/response processing. - response_flags: "ResponseFlags" = betterproto.message_field(16) - # All metadata encountered during request processing, including endpoint - # selection. This can be used to associate IDs attached to the various - # configurations used to process this request with the access log entry. For - # example, a route created from a higher level forwarding rule with some ID - # can place that ID in this field and cross reference later. It can also be - # used to determine if a canary endpoint was used or not. - metadata: "___config_core_v3__.Metadata" = betterproto.message_field(17) - # If upstream connection failed due to transport socket (e.g. TLS handshake), - # provides the failure reason from the transport socket. The format of this - # field depends on the configured upstream transport socket. Common TLS - # failures are in :ref:`TLS trouble shooting - # `. - upstream_transport_failure_reason: str = betterproto.string_field(18) - # The name of the route - route_name: str = betterproto.string_field(19) - # This field is the downstream direct remote address on which the request - # from the user was received. Note: This is always the physical peer, even if - # the remote address is inferred from for example the x-forwarder-for header, - # proxy protocol, etc. - downstream_direct_remote_address: "___config_core_v3__.Address" = ( - betterproto.message_field(20) - ) - # Map of filter state in stream info that have been configured to be logged. - # If the filter state serialized to any message other than - # `google.protobuf.Any` it will be packed into `google.protobuf.Any`. - filter_state_objects: Dict[ - str, "betterproto_lib_google_protobuf.Any" - ] = betterproto.map_field(21, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # A list of custom tags, which annotate logs with additional information. To - # configure this value, users should configure :ref:`custom_tags `. - custom_tags: Dict[str, str] = betterproto.map_field( - 22, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass(eq=False, repr=False) -class ResponseFlags(betterproto.Message): - """ - Flags indicating occurrences during request/response processing. [#next- - free-field: 27] - """ - - # Indicates local server healthcheck failed. - failed_local_healthcheck: bool = betterproto.bool_field(1) - # Indicates there was no healthy upstream. - no_healthy_upstream: bool = betterproto.bool_field(2) - # Indicates an there was an upstream request timeout. - upstream_request_timeout: bool = betterproto.bool_field(3) - # Indicates local codec level reset was sent on the stream. - local_reset: bool = betterproto.bool_field(4) - # Indicates remote codec level reset was received on the stream. - upstream_remote_reset: bool = betterproto.bool_field(5) - # Indicates there was a local reset by a connection pool due to an initial - # connection failure. - upstream_connection_failure: bool = betterproto.bool_field(6) - # Indicates the stream was reset due to an upstream connection termination. - upstream_connection_termination: bool = betterproto.bool_field(7) - # Indicates the stream was reset because of a resource overflow. - upstream_overflow: bool = betterproto.bool_field(8) - # Indicates no route was found for the request. - no_route_found: bool = betterproto.bool_field(9) - # Indicates that the request was delayed before proxying. - delay_injected: bool = betterproto.bool_field(10) - # Indicates that the request was aborted with an injected error code. - fault_injected: bool = betterproto.bool_field(11) - # Indicates that the request was rate-limited locally. - rate_limited: bool = betterproto.bool_field(12) - # Indicates if the request was deemed unauthorized and the reason for it. - unauthorized_details: "ResponseFlagsUnauthorized" = betterproto.message_field(13) - # Indicates that the request was rejected because there was an error in rate - # limit service. - rate_limit_service_error: bool = betterproto.bool_field(14) - # Indicates the stream was reset due to a downstream connection termination. - downstream_connection_termination: bool = betterproto.bool_field(15) - # Indicates that the upstream retry limit was exceeded, resulting in a - # downstream error. - upstream_retry_limit_exceeded: bool = betterproto.bool_field(16) - # Indicates that the stream idle timeout was hit, resulting in a downstream - # 408. - stream_idle_timeout: bool = betterproto.bool_field(17) - # Indicates that the request was rejected because an envoy request header - # failed strict validation. - invalid_envoy_request_headers: bool = betterproto.bool_field(18) - # Indicates there was an HTTP protocol error on the downstream request. - downstream_protocol_error: bool = betterproto.bool_field(19) - # Indicates there was a max stream duration reached on the upstream request. - upstream_max_stream_duration_reached: bool = betterproto.bool_field(20) - # Indicates the response was served from a cache filter. - response_from_cache_filter: bool = betterproto.bool_field(21) - # Indicates that a filter configuration is not available. - no_filter_config_found: bool = betterproto.bool_field(22) - # Indicates that request or connection exceeded the downstream connection - # duration. - duration_timeout: bool = betterproto.bool_field(23) - # Indicates there was an HTTP protocol error in the upstream response. - upstream_protocol_error: bool = betterproto.bool_field(24) - # Indicates no cluster was found for the request. - no_cluster_found: bool = betterproto.bool_field(25) - # Indicates overload manager terminated the request. - overload_manager: bool = betterproto.bool_field(26) - - -@dataclass(eq=False, repr=False) -class ResponseFlagsUnauthorized(betterproto.Message): - reason: "ResponseFlagsUnauthorizedReason" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class TlsProperties(betterproto.Message): - """Properties of a negotiated TLS connection. [#next-free-field: 7]""" - - # Version of TLS that was negotiated. - tls_version: "TlsPropertiesTlsVersion" = betterproto.enum_field(1) - # TLS cipher suite negotiated during handshake. The value is a four-digit hex - # code defined by the IANA TLS Cipher Suite Registry (e.g. ``009C`` for - # ``TLS_RSA_WITH_AES_128_GCM_SHA256``). Here it is expressed as an integer. - tls_cipher_suite: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # SNI hostname from handshake. - tls_sni_hostname: str = betterproto.string_field(3) - # Properties of the local certificate used to negotiate TLS. - local_certificate_properties: "TlsPropertiesCertificateProperties" = ( - betterproto.message_field(4) - ) - # Properties of the peer certificate used to negotiate TLS. - peer_certificate_properties: "TlsPropertiesCertificateProperties" = ( - betterproto.message_field(5) - ) - # The TLS session ID. - tls_session_id: str = betterproto.string_field(6) - - -@dataclass(eq=False, repr=False) -class TlsPropertiesCertificateProperties(betterproto.Message): - # SANs present in the certificate. - subject_alt_name: List[ - "TlsPropertiesCertificatePropertiesSubjectAltName" - ] = betterproto.message_field(1) - # The subject field of the certificate. - subject: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class TlsPropertiesCertificatePropertiesSubjectAltName(betterproto.Message): - uri: str = betterproto.string_field(1, group="san") - # [#not-implemented-hide:] - dns: str = betterproto.string_field(2, group="san") - - -@dataclass(eq=False, repr=False) -class HttpRequestProperties(betterproto.Message): - """[#next-free-field: 14]""" - - # The request method (RFC 7231/2616). - request_method: "___config_core_v3__.RequestMethod" = betterproto.enum_field(1) - # The scheme portion of the incoming request URI. - scheme: str = betterproto.string_field(2) - # HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. - authority: str = betterproto.string_field(3) - # The port of the incoming request URI (unused currently, as port is composed - # onto authority). - port: Optional[int] = betterproto.message_field(4, wraps=betterproto.TYPE_UINT32) - # The path portion from the incoming request URI. - path: str = betterproto.string_field(5) - # Value of the ``User-Agent`` request header. - user_agent: str = betterproto.string_field(6) - # Value of the ``Referer`` request header. - referer: str = betterproto.string_field(7) - # Value of the ``X-Forwarded-For`` request header. - forwarded_for: str = betterproto.string_field(8) - # Value of the ``X-Request-Id`` request header This header is used by Envoy - # to uniquely identify a request. It will be generated for all external - # requests and internal requests that do not already have a request ID. - request_id: str = betterproto.string_field(9) - # Value of the ``X-Envoy-Original-Path`` request header. - original_path: str = betterproto.string_field(10) - # Size of the HTTP request headers in bytes. This value is captured from the - # OSI layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - request_headers_bytes: int = betterproto.uint64_field(11) - # Size of the HTTP request body in bytes. This value is captured from the OSI - # layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - request_body_bytes: int = betterproto.uint64_field(12) - # Map of additional headers that have been configured to be logged. - request_headers: Dict[str, str] = betterproto.map_field( - 13, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass(eq=False, repr=False) -class HttpResponseProperties(betterproto.Message): - """[#next-free-field: 7]""" - - # The HTTP response code returned by Envoy. - response_code: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Size of the HTTP response headers in bytes. This value is captured from the - # OSI layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - response_headers_bytes: int = betterproto.uint64_field(2) - # Size of the HTTP response body in bytes. This value is captured from the - # OSI layer 7 perspective, i.e. it does not include overhead from framing or - # encoding at other networking layers. - response_body_bytes: int = betterproto.uint64_field(3) - # Map of additional headers configured to be logged. - response_headers: Dict[str, str] = betterproto.map_field( - 4, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # Map of trailers configured to be logged. - response_trailers: Dict[str, str] = betterproto.map_field( - 5, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # The HTTP response code details. - response_code_details: str = betterproto.string_field(6) - - -from ....config.core import v3 as ___config_core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/data/cluster/__init__.py b/src/envoy_data_plane/envoy/data/cluster/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/data/cluster/v2alpha/__init__.py b/src/envoy_data_plane/envoy/data/cluster/v2alpha/__init__.py deleted file mode 100644 index 45469c2..0000000 --- a/src/envoy_data_plane/envoy/data/cluster/v2alpha/__init__.py +++ /dev/null @@ -1,120 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/cluster/v2alpha/outlier_detection_event.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class OutlierEjectionType(betterproto.Enum): - """Type of ejection that took place""" - - # In case upstream host returns certain number of consecutive 5xx. If :ref:`o - # utlier_detection.split_external_local_origin_errors` is *false*, all type - # of errors are treated as HTTP 5xx errors. See :ref:`Cluster outlier - # detection ` documentation for details. - CONSECUTIVE_5XX = 0 - # In case upstream host returns certain number of consecutive gateway errors - CONSECUTIVE_GATEWAY_FAILURE = 1 - # Runs over aggregated success rate statistics from every host in cluster and - # selects hosts for which ratio of successful replies deviates from other - # hosts in the cluster. If :ref:`outlier_detection.split_external_local_origi - # n_errors` is *false*, all errors (externally and locally generated) are - # used to calculate success rate statistics. See :ref:`Cluster outlier - # detection ` documentation for details. - SUCCESS_RATE = 2 - # Consecutive local origin failures: Connection failures, resets, timeouts, - # etc This type of ejection happens only when :ref:`outlier_detection.split_e - # xternal_local_origin_errors` is set to *true*. See :ref:`Cluster outlier - # detection ` documentation for - CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3 - # Runs over aggregated success rate statistics for local origin failures for - # all hosts in the cluster and selects hosts for which success rate deviates - # from other hosts in the cluster. This type of ejection happens only when :r - # ef:`outlier_detection.split_external_local_origin_errors` is set to - # *true*. See :ref:`Cluster outlier detection - # ` documentation for - SUCCESS_RATE_LOCAL_ORIGIN = 4 - # Runs over aggregated success rate statistics from every host in cluster and - # selects hosts for which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE = 5 - # Runs over aggregated success rate statistics for local origin failures from - # every host in cluster and selects hosts for which ratio of failed replies - # is above configured value. - FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6 - - -class Action(betterproto.Enum): - """Represents possible action applied to upstream host""" - - # In case host was excluded from service - EJECT = 0 - # In case host was brought back into service - UNEJECT = 1 - - -@dataclass(eq=False, repr=False) -class OutlierDetectionEvent(betterproto.Message): - """[#next-free-field: 12]""" - - # In case of eject represents type of ejection that took place. - type: "OutlierEjectionType" = betterproto.enum_field(1) - # Timestamp for event. - timestamp: datetime = betterproto.message_field(2) - # The time in seconds since the last action (either an ejection or - # unejection) took place. - secs_since_last_action: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT64 - ) - # The :ref:`cluster ` that owns the ejected host. - cluster_name: str = betterproto.string_field(4) - # The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - upstream_url: str = betterproto.string_field(5) - # The action that took place. - action: "Action" = betterproto.enum_field(6) - # If ``action`` is ``eject``, specifies the number of times the host has been - # ejected (local to that Envoy and gets reset if the host gets removed from - # the upstream cluster for any reason and then re-added). - num_ejections: int = betterproto.uint32_field(7) - # If ``action`` is ``eject``, specifies if the ejection was enforced. - # ``true`` means the host was ejected. ``false`` means the event was logged - # but the host was not actually ejected. - enforced: bool = betterproto.bool_field(8) - eject_success_rate_event: "OutlierEjectSuccessRate" = betterproto.message_field( - 9, group="event" - ) - eject_consecutive_event: "OutlierEjectConsecutive" = betterproto.message_field( - 10, group="event" - ) - eject_failure_percentage_event: "OutlierEjectFailurePercentage" = ( - betterproto.message_field(11, group="event") - ) - - -@dataclass(eq=False, repr=False) -class OutlierEjectSuccessRate(betterproto.Message): - # Host’s success rate at the time of the ejection event on a 0-100 range. - host_success_rate: int = betterproto.uint32_field(1) - # Average success rate of the hosts in the cluster at the time of the - # ejection event on a 0-100 range. - cluster_average_success_rate: int = betterproto.uint32_field(2) - # Success rate ejection threshold at the time of the ejection event. - cluster_success_rate_ejection_threshold: int = betterproto.uint32_field(3) - - -@dataclass(eq=False, repr=False) -class OutlierEjectConsecutive(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class OutlierEjectFailurePercentage(betterproto.Message): - # Host's success rate at the time of the ejection event on a 0-100 range. - host_success_rate: int = betterproto.uint32_field(1) diff --git a/src/envoy_data_plane/envoy/data/cluster/v3/__init__.py b/src/envoy_data_plane/envoy/data/cluster/v3/__init__.py deleted file mode 100644 index 42895b3..0000000 --- a/src/envoy_data_plane/envoy/data/cluster/v3/__init__.py +++ /dev/null @@ -1,124 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/cluster/v3/outlier_detection_event.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class OutlierEjectionType(betterproto.Enum): - """Type of ejection that took place""" - - # In case upstream host returns certain number of consecutive 5xx. If :ref:`o - # utlier_detection.split_external_local_origin_errors` is - # *false*, all type of errors are treated as HTTP 5xx errors. See - # :ref:`Cluster outlier detection ` - # documentation for details. - CONSECUTIVE_5XX = 0 - # In case upstream host returns certain number of consecutive gateway errors - CONSECUTIVE_GATEWAY_FAILURE = 1 - # Runs over aggregated success rate statistics from every host in cluster and - # selects hosts for which ratio of successful replies deviates from other - # hosts in the cluster. If :ref:`outlier_detection.split_external_local_origi - # n_errors` is *false*, all errors (externally and locally - # generated) are used to calculate success rate statistics. See :ref:`Cluster - # outlier detection ` documentation for - # details. - SUCCESS_RATE = 2 - # Consecutive local origin failures: Connection failures, resets, timeouts, - # etc This type of ejection happens only when :ref:`outlier_detection.split_e - # xternal_local_origin_errors` is set to *true*. See - # :ref:`Cluster outlier detection ` - # documentation for - CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3 - # Runs over aggregated success rate statistics for local origin failures for - # all hosts in the cluster and selects hosts for which success rate deviates - # from other hosts in the cluster. This type of ejection happens only when :r - # ef:`outlier_detection.split_external_local_origin_errors` is - # set to *true*. See :ref:`Cluster outlier detection - # ` documentation for - SUCCESS_RATE_LOCAL_ORIGIN = 4 - # Runs over aggregated success rate statistics from every host in cluster and - # selects hosts for which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE = 5 - # Runs over aggregated success rate statistics for local origin failures from - # every host in cluster and selects hosts for which ratio of failed replies - # is above configured value. - FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6 - - -class Action(betterproto.Enum): - """Represents possible action applied to upstream host""" - - # In case host was excluded from service - EJECT = 0 - # In case host was brought back into service - UNEJECT = 1 - - -@dataclass(eq=False, repr=False) -class OutlierDetectionEvent(betterproto.Message): - """[#next-free-field: 12]""" - - # In case of eject represents type of ejection that took place. - type: "OutlierEjectionType" = betterproto.enum_field(1) - # Timestamp for event. - timestamp: datetime = betterproto.message_field(2) - # The time in seconds since the last action (either an ejection or - # unejection) took place. - secs_since_last_action: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT64 - ) - # The :ref:`cluster ` that owns - # the ejected host. - cluster_name: str = betterproto.string_field(4) - # The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - upstream_url: str = betterproto.string_field(5) - # The action that took place. - action: "Action" = betterproto.enum_field(6) - # If ``action`` is ``eject``, specifies the number of times the host has been - # ejected (local to that Envoy and gets reset if the host gets removed from - # the upstream cluster for any reason and then re-added). - num_ejections: int = betterproto.uint32_field(7) - # If ``action`` is ``eject``, specifies if the ejection was enforced. - # ``true`` means the host was ejected. ``false`` means the event was logged - # but the host was not actually ejected. - enforced: bool = betterproto.bool_field(8) - eject_success_rate_event: "OutlierEjectSuccessRate" = betterproto.message_field( - 9, group="event" - ) - eject_consecutive_event: "OutlierEjectConsecutive" = betterproto.message_field( - 10, group="event" - ) - eject_failure_percentage_event: "OutlierEjectFailurePercentage" = ( - betterproto.message_field(11, group="event") - ) - - -@dataclass(eq=False, repr=False) -class OutlierEjectSuccessRate(betterproto.Message): - # Host’s success rate at the time of the ejection event on a 0-100 range. - host_success_rate: int = betterproto.uint32_field(1) - # Average success rate of the hosts in the cluster at the time of the - # ejection event on a 0-100 range. - cluster_average_success_rate: int = betterproto.uint32_field(2) - # Success rate ejection threshold at the time of the ejection event. - cluster_success_rate_ejection_threshold: int = betterproto.uint32_field(3) - - -@dataclass(eq=False, repr=False) -class OutlierEjectConsecutive(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class OutlierEjectFailurePercentage(betterproto.Message): - # Host's success rate at the time of the ejection event on a 0-100 range. - host_success_rate: int = betterproto.uint32_field(1) diff --git a/src/envoy_data_plane/envoy/data/core/__init__.py b/src/envoy_data_plane/envoy/data/core/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/data/core/v2alpha/__init__.py b/src/envoy_data_plane/envoy/data/core/v2alpha/__init__.py deleted file mode 100644 index 2bd4a83..0000000 --- a/src/envoy_data_plane/envoy/data/core/v2alpha/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/core/v2alpha/health_check_event.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class HealthCheckFailureType(betterproto.Enum): - ACTIVE = 0 - PASSIVE = 1 - NETWORK = 2 - - -class HealthCheckerType(betterproto.Enum): - HTTP = 0 - TCP = 1 - GRPC = 2 - REDIS = 3 - - -@dataclass(eq=False, repr=False) -class HealthCheckEvent(betterproto.Message): - """[#next-free-field: 10]""" - - health_checker_type: "HealthCheckerType" = betterproto.enum_field(1) - host: "___api_v2_core__.Address" = betterproto.message_field(2) - cluster_name: str = betterproto.string_field(3) - # Host ejection. - eject_unhealthy_event: "HealthCheckEjectUnhealthy" = betterproto.message_field( - 4, group="event" - ) - # Host addition. - add_healthy_event: "HealthCheckAddHealthy" = betterproto.message_field( - 5, group="event" - ) - # Host failure. - health_check_failure_event: "HealthCheckFailure" = betterproto.message_field( - 7, group="event" - ) - # Healthy host became degraded. - degraded_healthy_host: "DegradedHealthyHost" = betterproto.message_field( - 8, group="event" - ) - # A degraded host returned to being healthy. - no_longer_degraded_host: "NoLongerDegradedHost" = betterproto.message_field( - 9, group="event" - ) - # Timestamp for event. - timestamp: datetime = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class HealthCheckEjectUnhealthy(betterproto.Message): - # The type of failure that caused this ejection. - failure_type: "HealthCheckFailureType" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckAddHealthy(betterproto.Message): - # Whether this addition is the result of the first ever health check on a - # host, in which case the configured :ref:`healthy threshold - # ` is bypassed and the - # host is immediately added. - first_check: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckFailure(betterproto.Message): - # The type of failure that caused this event. - failure_type: "HealthCheckFailureType" = betterproto.enum_field(1) - # Whether this event is the result of the first ever health check on a host. - first_check: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class DegradedHealthyHost(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class NoLongerDegradedHost(betterproto.Message): - pass - - -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/data/core/v3/__init__.py b/src/envoy_data_plane/envoy/data/core/v3/__init__.py deleted file mode 100644 index 1913865..0000000 --- a/src/envoy_data_plane/envoy/data/core/v3/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/core/v3/health_check_event.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class HealthCheckFailureType(betterproto.Enum): - ACTIVE = 0 - PASSIVE = 1 - NETWORK = 2 - NETWORK_TIMEOUT = 3 - - -class HealthCheckerType(betterproto.Enum): - HTTP = 0 - TCP = 1 - GRPC = 2 - REDIS = 3 - - -@dataclass(eq=False, repr=False) -class HealthCheckEvent(betterproto.Message): - """[#next-free-field: 10]""" - - health_checker_type: "HealthCheckerType" = betterproto.enum_field(1) - host: "___config_core_v3__.Address" = betterproto.message_field(2) - cluster_name: str = betterproto.string_field(3) - # Host ejection. - eject_unhealthy_event: "HealthCheckEjectUnhealthy" = betterproto.message_field( - 4, group="event" - ) - # Host addition. - add_healthy_event: "HealthCheckAddHealthy" = betterproto.message_field( - 5, group="event" - ) - # Host failure. - health_check_failure_event: "HealthCheckFailure" = betterproto.message_field( - 7, group="event" - ) - # Healthy host became degraded. - degraded_healthy_host: "DegradedHealthyHost" = betterproto.message_field( - 8, group="event" - ) - # A degraded host returned to being healthy. - no_longer_degraded_host: "NoLongerDegradedHost" = betterproto.message_field( - 9, group="event" - ) - # Timestamp for event. - timestamp: datetime = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class HealthCheckEjectUnhealthy(betterproto.Message): - # The type of failure that caused this ejection. - failure_type: "HealthCheckFailureType" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckAddHealthy(betterproto.Message): - # Whether this addition is the result of the first ever health check on a - # host, in which case the configured :ref:`healthy threshold - # ` is - # bypassed and the host is immediately added. - first_check: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckFailure(betterproto.Message): - # The type of failure that caused this event. - failure_type: "HealthCheckFailureType" = betterproto.enum_field(1) - # Whether this event is the result of the first ever health check on a host. - first_check: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class DegradedHealthyHost(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class NoLongerDegradedHost(betterproto.Message): - pass - - -from ....config.core import v3 as ___config_core_v3__ diff --git a/src/envoy_data_plane/envoy/data/dns/__init__.py b/src/envoy_data_plane/envoy/data/dns/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/data/dns/v2alpha/__init__.py b/src/envoy_data_plane/envoy/data/dns/v2alpha/__init__.py deleted file mode 100644 index 79a798c..0000000 --- a/src/envoy_data_plane/envoy/data/dns/v2alpha/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/dns/v2alpha/dns_table.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class DnsTable(betterproto.Message): - """ - This message contains the configuration for the DNS Filter if populated - from the control plane - """ - - # Control how many times envoy makes an attempt to forward a query to an - # external server - external_retry_count: int = betterproto.uint32_field(1) - # Fully qualified domain names for which Envoy will respond to queries - virtual_domains: List["DnsTableDnsVirtualDomain"] = betterproto.message_field(2) - # This field serves to help Envoy determine whether it can authoritatively - # answer a query for a name matching a suffix in this list. If the query name - # does not match a suffix in this list, Envoy will forward the query to an - # upstream DNS server - known_suffixes: List["___type_matcher__.StringMatcher"] = betterproto.message_field( - 3 - ) - - -@dataclass(eq=False, repr=False) -class DnsTableAddressList(betterproto.Message): - """ - This message contains a list of IP addresses returned for a query for a - known name - """ - - # This field contains a well formed IP address that is returned in the answer - # for a name query. The address field can be an IPv4 or IPv6 address. Address - # family detection is done automatically when Envoy parses the string. Since - # this field is repeated, Envoy will return one randomly chosen entry from - # this list in the DNS response. The random index will vary per query so that - # we prevent clients pinning on a single address for a configured domain - address: List[str] = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class DnsTableDnsEndpoint(betterproto.Message): - """ - This message type is extensible and can contain a list of addresses or - dictate some other method for resolving the addresses for an endpoint - """ - - address_list: "DnsTableAddressList" = betterproto.message_field( - 1, group="endpoint_config" - ) - - -@dataclass(eq=False, repr=False) -class DnsTableDnsVirtualDomain(betterproto.Message): - # The domain name for which Envoy will respond to query requests - name: str = betterproto.string_field(1) - # The configuration containing the method to determine the address of this - # endpoint - endpoint: "DnsTableDnsEndpoint" = betterproto.message_field(2) - # Sets the TTL in dns answers from Envoy returned to the client - answer_ttl: timedelta = betterproto.message_field(3) - - -from ....type import matcher as ___type_matcher__ diff --git a/src/envoy_data_plane/envoy/data/dns/v3/__init__.py b/src/envoy_data_plane/envoy/data/dns/v3/__init__.py deleted file mode 100644 index d1f9bc5..0000000 --- a/src/envoy_data_plane/envoy/data/dns/v3/__init__.py +++ /dev/null @@ -1,142 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/dns/v3/dns_table.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class DnsTable(betterproto.Message): - """ - This message contains the configuration for the DNS Filter if populated - from the control plane - """ - - # Control how many times Envoy makes an attempt to forward a query to an - # external DNS server - external_retry_count: int = betterproto.uint32_field(1) - # Fully qualified domain names for which Envoy will respond to DNS queries. - # By leaving this list empty, Envoy will forward all queries to external - # resolvers - virtual_domains: List["DnsTableDnsVirtualDomain"] = betterproto.message_field(2) - # This field is deprecated and no longer used in Envoy. The filter's behavior - # has changed internally to use a different data structure allowing the - # filter to determine whether a query is for known domain without the use of - # this field. This field serves to help Envoy determine whether it can - # authoritatively answer a query for a name matching a suffix in this list. - # If the query name does not match a suffix in this list, Envoy will forward - # the query to an upstream DNS server - known_suffixes: List[ - "___type_matcher_v3__.StringMatcher" - ] = betterproto.message_field(3) - - def __post_init__(self) -> None: - super().__post_init__() - if self.known_suffixes: - warnings.warn("DnsTable.known_suffixes is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class DnsTableAddressList(betterproto.Message): - """ - This message contains a list of IP addresses returned for a query for a - known name - """ - - # This field contains a well formed IP address that is returned in the answer - # for a name query. The address field can be an IPv4 or IPv6 address. Address - # family detection is done automatically when Envoy parses the string. Since - # this field is repeated, Envoy will return as many entries from this list in - # the DNS response while keeping the response under 512 bytes - address: List[str] = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class DnsTableDnsServiceProtocol(betterproto.Message): - """Specify the service protocol using a numeric or string value""" - - # Specify the protocol number for the service. Envoy will try to resolve the - # number to the protocol name. For example, 6 will resolve to "tcp". Refer - # to: https://www.iana.org/assignments/protocol-numbers/protocol- - # numbers.xhtml for protocol names and numbers - number: int = betterproto.uint32_field(1, group="protocol_config") - # Specify the protocol name for the service. - name: str = betterproto.string_field(2, group="protocol_config") - - -@dataclass(eq=False, repr=False) -class DnsTableDnsServiceTarget(betterproto.Message): - """Specify the target for a given DNS service [#next-free-field: 6]""" - - # Use a resolvable hostname as the endpoint for a service. - host_name: str = betterproto.string_field(1, group="endpoint_type") - # Use a cluster name as the endpoint for a service. - cluster_name: str = betterproto.string_field(2, group="endpoint_type") - # The priority of the service record target - priority: int = betterproto.uint32_field(3) - # The weight of the service record target - weight: int = betterproto.uint32_field(4) - # The port to which the service is bound. This value is optional if the - # target is a cluster. Setting port to zero in this case makes the filter use - # the port value from the cluster host - port: int = betterproto.uint32_field(5) - - -@dataclass(eq=False, repr=False) -class DnsTableDnsService(betterproto.Message): - """ - This message defines a service selection record returned for a service - query in a domain - """ - - # The name of the service without the protocol or domain name - service_name: str = betterproto.string_field(1) - # The service protocol. This can be specified as a string or the numeric - # value of the protocol - protocol: "DnsTableDnsServiceProtocol" = betterproto.message_field(2) - # The service entry time to live. This is independent from the DNS Answer - # record TTL - ttl: timedelta = betterproto.message_field(3) - # The list of targets hosting the service - targets: List["DnsTableDnsServiceTarget"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class DnsTableDnsServiceList(betterproto.Message): - """Define a list of service records for a given service""" - - services: List["DnsTableDnsService"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DnsTableDnsEndpoint(betterproto.Message): - # Define a list of addresses to return for the specified endpoint - address_list: "DnsTableAddressList" = betterproto.message_field( - 1, group="endpoint_config" - ) - # Define a cluster whose addresses are returned for the specified endpoint - cluster_name: str = betterproto.string_field(2, group="endpoint_config") - # Define a DNS Service List for the specified endpoint - service_list: "DnsTableDnsServiceList" = betterproto.message_field( - 3, group="endpoint_config" - ) - - -@dataclass(eq=False, repr=False) -class DnsTableDnsVirtualDomain(betterproto.Message): - # A domain name for which Envoy will respond to query requests - name: str = betterproto.string_field(1) - # The configuration containing the method to determine the address of this - # endpoint - endpoint: "DnsTableDnsEndpoint" = betterproto.message_field(2) - # Sets the TTL in DNS answers from Envoy returned to the client. The default - # TTL is 300s - answer_ttl: timedelta = betterproto.message_field(3) - - -from ....type.matcher import v3 as ___type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/data/tap/__init__.py b/src/envoy_data_plane/envoy/data/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/data/tap/v2alpha/__init__.py b/src/envoy_data_plane/envoy/data/tap/v2alpha/__init__.py deleted file mode 100644 index dd6c93a..0000000 --- a/src/envoy_data_plane/envoy/data/tap/v2alpha/__init__.py +++ /dev/null @@ -1,205 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/tap/v2alpha/common.proto, envoy/data/tap/v2alpha/http.proto, envoy/data/tap/v2alpha/transport.proto, envoy/data/tap/v2alpha/wrapper.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Body(betterproto.Message): - """ - Wrapper for tapped body data. This includes HTTP request/response body, - transport socket received and transmitted data, etc. - """ - - # Body data as bytes. By default, tap body data will be present in this - # field, as the proto `bytes` type can contain any valid byte. - as_bytes: bytes = betterproto.bytes_field(1, group="body_type") - # Body data as string. This field is only used when the - # :ref:`JSON_BODY_AS_STRING ` sink format type is selected. See the - # documentation for that option for why this is useful. - as_string: str = betterproto.string_field(2, group="body_type") - # Specifies whether body data has been truncated to fit within the specified - # :ref:`max_buffered_rx_bytes - # ` - # and :ref:`max_buffered_tx_bytes - # ` - # settings. - truncated: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class HttpBufferedTrace(betterproto.Message): - """A fully buffered HTTP trace message.""" - - # Request message. - request: "HttpBufferedTraceMessage" = betterproto.message_field(1) - # Response message. - response: "HttpBufferedTraceMessage" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HttpBufferedTraceMessage(betterproto.Message): - """HTTP message wrapper.""" - - # Message headers. - headers: List["___api_v2_core__.HeaderValue"] = betterproto.message_field(1) - # Message body. - body: "Body" = betterproto.message_field(2) - # Message trailers. - trailers: List["___api_v2_core__.HeaderValue"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class HttpStreamedTraceSegment(betterproto.Message): - """ - A streamed HTTP trace segment. Multiple segments make up a full trace. - [#next-free-field: 8] - """ - - # Trace ID unique to the originating Envoy only. Trace IDs can repeat and - # should not be used for long term stable uniqueness. - trace_id: int = betterproto.uint64_field(1) - # Request headers. - request_headers: "___api_v2_core__.HeaderMap" = betterproto.message_field( - 2, group="message_piece" - ) - # Request body chunk. - request_body_chunk: "Body" = betterproto.message_field(3, group="message_piece") - # Request trailers. - request_trailers: "___api_v2_core__.HeaderMap" = betterproto.message_field( - 4, group="message_piece" - ) - # Response headers. - response_headers: "___api_v2_core__.HeaderMap" = betterproto.message_field( - 5, group="message_piece" - ) - # Response body chunk. - response_body_chunk: "Body" = betterproto.message_field(6, group="message_piece") - # Response trailers. - response_trailers: "___api_v2_core__.HeaderMap" = betterproto.message_field( - 7, group="message_piece" - ) - - -@dataclass(eq=False, repr=False) -class Connection(betterproto.Message): - """Connection properties.""" - - # Local address. - local_address: "___api_v2_core__.Address" = betterproto.message_field(2) - # Remote address. - remote_address: "___api_v2_core__.Address" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class SocketEvent(betterproto.Message): - """Event in a socket trace.""" - - # Timestamp for event. - timestamp: datetime = betterproto.message_field(1) - read: "SocketEventRead" = betterproto.message_field(2, group="event_selector") - write: "SocketEventWrite" = betterproto.message_field(3, group="event_selector") - closed: "SocketEventClosed" = betterproto.message_field(4, group="event_selector") - - -@dataclass(eq=False, repr=False) -class SocketEventRead(betterproto.Message): - """Data read by Envoy from the transport socket.""" - - # Binary data read. - data: "Body" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class SocketEventWrite(betterproto.Message): - """Data written by Envoy to the transport socket.""" - - # Binary data written. - data: "Body" = betterproto.message_field(1) - # Stream was half closed after this write. - end_stream: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class SocketEventClosed(betterproto.Message): - """The connection was closed.""" - - pass - - -@dataclass(eq=False, repr=False) -class SocketBufferedTrace(betterproto.Message): - """ - Sequence of read/write events that constitute a buffered trace on a socket. - [#next-free-field: 6] - """ - - # Trace ID unique to the originating Envoy only. Trace IDs can repeat and - # should not be used for long term stable uniqueness. Matches connection IDs - # used in Envoy logs. - trace_id: int = betterproto.uint64_field(1) - # Connection properties. - connection: "Connection" = betterproto.message_field(2) - # Sequence of observed events. - events: List["SocketEvent"] = betterproto.message_field(3) - # Set to true if read events were truncated due to the - # :ref:`max_buffered_rx_bytes - # ` - # setting. - read_truncated: bool = betterproto.bool_field(4) - # Set to true if write events were truncated due to the - # :ref:`max_buffered_tx_bytes - # ` - # setting. - write_truncated: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class SocketStreamedTraceSegment(betterproto.Message): - """ - A streamed socket trace segment. Multiple segments make up a full trace. - """ - - # Trace ID unique to the originating Envoy only. Trace IDs can repeat and - # should not be used for long term stable uniqueness. Matches connection IDs - # used in Envoy logs. - trace_id: int = betterproto.uint64_field(1) - # Connection properties. - connection: "Connection" = betterproto.message_field(2, group="message_piece") - # Socket event. - event: "SocketEvent" = betterproto.message_field(3, group="message_piece") - - -@dataclass(eq=False, repr=False) -class TraceWrapper(betterproto.Message): - """ - Wrapper for all fully buffered and streamed tap traces that Envoy emits. - This is required for sending traces over gRPC APIs or more easily - persisting binary messages to files. - """ - - # An HTTP buffered tap trace. - http_buffered_trace: "HttpBufferedTrace" = betterproto.message_field( - 1, group="trace" - ) - # An HTTP streamed tap trace segment. - http_streamed_trace_segment: "HttpStreamedTraceSegment" = betterproto.message_field( - 2, group="trace" - ) - # A socket buffered tap trace. - socket_buffered_trace: "SocketBufferedTrace" = betterproto.message_field( - 3, group="trace" - ) - # A socket streamed tap trace segment. - socket_streamed_trace_segment: "SocketStreamedTraceSegment" = ( - betterproto.message_field(4, group="trace") - ) - - -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/data/tap/v3/__init__.py b/src/envoy_data_plane/envoy/data/tap/v3/__init__.py deleted file mode 100644 index abcf336..0000000 --- a/src/envoy_data_plane/envoy/data/tap/v3/__init__.py +++ /dev/null @@ -1,205 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/data/tap/v3/common.proto, envoy/data/tap/v3/http.proto, envoy/data/tap/v3/transport.proto, envoy/data/tap/v3/wrapper.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Body(betterproto.Message): - """ - Wrapper for tapped body data. This includes HTTP request/response body, - transport socket received and transmitted data, etc. - """ - - # Body data as bytes. By default, tap body data will be present in this - # field, as the proto `bytes` type can contain any valid byte. - as_bytes: bytes = betterproto.bytes_field(1, group="body_type") - # Body data as string. This field is only used when the - # :ref:`JSON_BODY_AS_STRING ` sink format type is selected. See the - # documentation for that option for why this is useful. - as_string: str = betterproto.string_field(2, group="body_type") - # Specifies whether body data has been truncated to fit within the specified - # :ref:`max_buffered_rx_bytes - # ` and - # :ref:`max_buffered_tx_bytes - # ` - # settings. - truncated: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class HttpBufferedTrace(betterproto.Message): - """A fully buffered HTTP trace message.""" - - # Request message. - request: "HttpBufferedTraceMessage" = betterproto.message_field(1) - # Response message. - response: "HttpBufferedTraceMessage" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HttpBufferedTraceMessage(betterproto.Message): - """HTTP message wrapper.""" - - # Message headers. - headers: List["___config_core_v3__.HeaderValue"] = betterproto.message_field(1) - # Message body. - body: "Body" = betterproto.message_field(2) - # Message trailers. - trailers: List["___config_core_v3__.HeaderValue"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class HttpStreamedTraceSegment(betterproto.Message): - """ - A streamed HTTP trace segment. Multiple segments make up a full trace. - [#next-free-field: 8] - """ - - # Trace ID unique to the originating Envoy only. Trace IDs can repeat and - # should not be used for long term stable uniqueness. - trace_id: int = betterproto.uint64_field(1) - # Request headers. - request_headers: "___config_core_v3__.HeaderMap" = betterproto.message_field( - 2, group="message_piece" - ) - # Request body chunk. - request_body_chunk: "Body" = betterproto.message_field(3, group="message_piece") - # Request trailers. - request_trailers: "___config_core_v3__.HeaderMap" = betterproto.message_field( - 4, group="message_piece" - ) - # Response headers. - response_headers: "___config_core_v3__.HeaderMap" = betterproto.message_field( - 5, group="message_piece" - ) - # Response body chunk. - response_body_chunk: "Body" = betterproto.message_field(6, group="message_piece") - # Response trailers. - response_trailers: "___config_core_v3__.HeaderMap" = betterproto.message_field( - 7, group="message_piece" - ) - - -@dataclass(eq=False, repr=False) -class Connection(betterproto.Message): - """Connection properties.""" - - # Local address. - local_address: "___config_core_v3__.Address" = betterproto.message_field(2) - # Remote address. - remote_address: "___config_core_v3__.Address" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class SocketEvent(betterproto.Message): - """Event in a socket trace.""" - - # Timestamp for event. - timestamp: datetime = betterproto.message_field(1) - read: "SocketEventRead" = betterproto.message_field(2, group="event_selector") - write: "SocketEventWrite" = betterproto.message_field(3, group="event_selector") - closed: "SocketEventClosed" = betterproto.message_field(4, group="event_selector") - - -@dataclass(eq=False, repr=False) -class SocketEventRead(betterproto.Message): - """Data read by Envoy from the transport socket.""" - - # Binary data read. - data: "Body" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class SocketEventWrite(betterproto.Message): - """Data written by Envoy to the transport socket.""" - - # Binary data written. - data: "Body" = betterproto.message_field(1) - # Stream was half closed after this write. - end_stream: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class SocketEventClosed(betterproto.Message): - """The connection was closed.""" - - pass - - -@dataclass(eq=False, repr=False) -class SocketBufferedTrace(betterproto.Message): - """ - Sequence of read/write events that constitute a buffered trace on a socket. - [#next-free-field: 6] - """ - - # Trace ID unique to the originating Envoy only. Trace IDs can repeat and - # should not be used for long term stable uniqueness. Matches connection IDs - # used in Envoy logs. - trace_id: int = betterproto.uint64_field(1) - # Connection properties. - connection: "Connection" = betterproto.message_field(2) - # Sequence of observed events. - events: List["SocketEvent"] = betterproto.message_field(3) - # Set to true if read events were truncated due to the - # :ref:`max_buffered_rx_bytes - # ` - # setting. - read_truncated: bool = betterproto.bool_field(4) - # Set to true if write events were truncated due to the - # :ref:`max_buffered_tx_bytes - # ` - # setting. - write_truncated: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class SocketStreamedTraceSegment(betterproto.Message): - """ - A streamed socket trace segment. Multiple segments make up a full trace. - """ - - # Trace ID unique to the originating Envoy only. Trace IDs can repeat and - # should not be used for long term stable uniqueness. Matches connection IDs - # used in Envoy logs. - trace_id: int = betterproto.uint64_field(1) - # Connection properties. - connection: "Connection" = betterproto.message_field(2, group="message_piece") - # Socket event. - event: "SocketEvent" = betterproto.message_field(3, group="message_piece") - - -@dataclass(eq=False, repr=False) -class TraceWrapper(betterproto.Message): - """ - Wrapper for all fully buffered and streamed tap traces that Envoy emits. - This is required for sending traces over gRPC APIs or more easily - persisting binary messages to files. - """ - - # An HTTP buffered tap trace. - http_buffered_trace: "HttpBufferedTrace" = betterproto.message_field( - 1, group="trace" - ) - # An HTTP streamed tap trace segment. - http_streamed_trace_segment: "HttpStreamedTraceSegment" = betterproto.message_field( - 2, group="trace" - ) - # A socket buffered tap trace. - socket_buffered_trace: "SocketBufferedTrace" = betterproto.message_field( - 3, group="trace" - ) - # A socket streamed tap trace segment. - socket_streamed_trace_segment: "SocketStreamedTraceSegment" = ( - betterproto.message_field(4, group="trace") - ) - - -from ....config.core import v3 as ___config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/__init__.py b/src/envoy_data_plane/envoy/extensions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/file/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/file/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/file/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/file/v3/__init__.py deleted file mode 100644 index a58d634..0000000 --- a/src/envoy_data_plane/envoy/extensions/access_loggers/file/v3/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/access_loggers/file/v3/file.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FileAccessLog(betterproto.Message): - """ - Custom configuration for an :ref:`AccessLog - ` that writes log entries - directly to a file. Configures the built-in *envoy.access_loggers.file* - AccessLog. [#next-free-field: 6] - """ - - # A path to a local file to which to write the access log entries. - path: str = betterproto.string_field(1) - # Access log :ref:`format string`. Envoy - # supports :ref:`custom access log formats ` as - # well as a :ref:`default format `. This - # field is deprecated. Please use :ref:`log_format `. - format: str = betterproto.string_field(2, group="access_log_format") - # Access log :ref:`format dictionary`. - # All values are rendered as strings. This field is deprecated. Please use - # :ref:`log_format `. - json_format: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field( - 3, group="access_log_format" - ) - # Access log :ref:`format dictionary`. - # Values are rendered as strings, numbers, or boolean values as appropriate. - # Nested JSON objects may be produced by some command operators - # (e.g.FILTER_STATE or DYNAMIC_METADATA). See the documentation for a - # specific command operator for details. This field is deprecated. Please use - # :ref:`log_format `. - typed_json_format: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(4, group="access_log_format") - ) - # Configuration to form access log data and format. If not specified, use - # :ref:`default format `. - log_format: "____config_core_v3__.SubstitutionFormatString" = ( - betterproto.message_field(5, group="access_log_format") - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.format: - warnings.warn("FileAccessLog.format is deprecated", DeprecationWarning) - if self.json_format: - warnings.warn("FileAccessLog.json_format is deprecated", DeprecationWarning) - if self.typed_json_format: - warnings.warn( - "FileAccessLog.typed_json_format is deprecated", DeprecationWarning - ) - - -from .....config.core import v3 as ____config_core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/filters/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/filters/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/filters/cel/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/filters/cel/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/filters/cel/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/filters/cel/v3/__init__.py deleted file mode 100644 index 79a1b2d..0000000 --- a/src/envoy_data_plane/envoy/extensions/access_loggers/filters/cel/v3/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/access_loggers/filters/cel/v3/cel.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ExpressionFilter(betterproto.Message): - """ - ExpressionFilter is an access logging filter that evaluates configured - symbolic Common Expression Language expressions to inform the decision to - generate an access log. - """ - - # Expression that, when evaluated, will be used to filter access logs. - # Expressions are based on the set of Envoy :ref:`attributes - # `. The provided expression must evaluate to true - # for logging (expression errors are considered false). Examples: - - # `response.code >= 400` - `(connection.mtls && request.headers['x-log-mtls'] - # == 'true') || request.url_path.contains('v1beta3')` - expression: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/grpc/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/grpc/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/grpc/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/grpc/v3/__init__.py deleted file mode 100644 index e79c255..0000000 --- a/src/envoy_data_plane/envoy/extensions/access_loggers/grpc/v3/__init__.py +++ /dev/null @@ -1,95 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/access_loggers/grpc/v3/als.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HttpGrpcAccessLogConfig(betterproto.Message): - """ - Configuration for the built-in *envoy.access_loggers.http_grpc* - :ref:`AccessLog `. This - configuration will populate :ref:`StreamAccessLogsMessage.http_logs `. - [#extension: envoy.access_loggers.http_grpc] - """ - - common_config: "CommonGrpcAccessLogConfig" = betterproto.message_field(1) - # Additional request headers to log in - # :ref:`HTTPRequestProperties.request_headers `. - additional_request_headers_to_log: List[str] = betterproto.string_field(2) - # Additional response headers to log in - # :ref:`HTTPResponseProperties.response_headers `. - additional_response_headers_to_log: List[str] = betterproto.string_field(3) - # Additional response trailers to log in - # :ref:`HTTPResponseProperties.response_trailers `. - additional_response_trailers_to_log: List[str] = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class TcpGrpcAccessLogConfig(betterproto.Message): - """ - Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This - configuration will populate *StreamAccessLogsMessage.tcp_logs*. - [#extension: envoy.access_loggers.tcp_grpc] - """ - - common_config: "CommonGrpcAccessLogConfig" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CommonGrpcAccessLogConfig(betterproto.Message): - """Common configuration for gRPC access logs. [#next-free-field: 9]""" - - # The friendly name of the access log to be returned in - # :ref:`StreamAccessLogsMessage.Identifier `. This allows the access log - # server to differentiate between different access logs coming from the same - # Envoy. - log_name: str = betterproto.string_field(1) - # The gRPC service for the access log service. - grpc_service: "____config_core_v3__.GrpcService" = betterproto.message_field(2) - # API version for access logs service transport protocol. This describes the - # access logs service gRPC endpoint and version of messages used on the wire. - transport_api_version: "____config_core_v3__.ApiVersion" = betterproto.enum_field(6) - # Interval for flushing access logs to the gRPC stream. Logger will flush - # requests every time this interval is elapsed, or when batch size limit is - # hit, whichever comes first. Defaults to 1 second. - buffer_flush_interval: timedelta = betterproto.message_field(3) - # Soft size limit in bytes for access log entries buffer. Logger will buffer - # requests until this limit it hit, or every time flush interval is elapsed, - # whichever comes first. Setting it to zero effectively disables the - # batching. Defaults to 16384. - buffer_size_bytes: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Additional filter state objects to log in :ref:`filter_state_objects `. - # Logger will call `FilterState::Object::serializeAsProto` to serialize the - # filter state object. - filter_state_objects_to_log: List[str] = betterproto.string_field(5) - # Sets the retry policy when the establishment of a gRPC stream fails. If the - # stream succeeds once in establishing If the stream succeeds at least once - # in establishing itself, no retry will be performed no matter what gRPC - # status is received. Note that only :ref:`num_retries - # ` will be used - # in this configuration. This feature is used only when you are using - # :ref:`Envoy gRPC client - # `. - grpc_stream_retry_policy: "____config_core_v3__.RetryPolicy" = ( - betterproto.message_field(7) - ) - # A list of custom tags with unique tag name to create tags for the logs. - custom_tags: List["____type_tracing_v3__.CustomTag"] = betterproto.message_field(8) - - -from .....config.core import v3 as ____config_core_v3__ -from .....type.tracing import v3 as ____type_tracing_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/open_telemetry/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/open_telemetry/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/open_telemetry/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/open_telemetry/v3/__init__.py deleted file mode 100644 index 2395f4f..0000000 --- a/src/envoy_data_plane/envoy/extensions/access_loggers/open_telemetry/v3/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OpenTelemetryAccessLogConfig(betterproto.Message): - """ - Configuration for the built-in *envoy.access_loggers.open_telemetry* - :ref:`AccessLog `. This - configuration will populate `opentelemetry.proto.collector.v1.logs.ExportLo - gsServiceRequest.resource_logs `_. OpenTelemetry `Resource `_ - attributes are filled with Envoy node info. In addition, the request start - time is set in the dedicated field. [#extension: - envoy.access_loggers.open_telemetry] [#comment:TODO(itamarkam): allow - configuration for resource attributes.] - """ - - # [#comment:TODO(itamarkam): add 'filter_state_objects_to_log' to logs.] - common_config: "__grpc_v3__.CommonGrpcAccessLogConfig" = betterproto.message_field( - 1 - ) - # OpenTelemetry `LogResource `_ fields, following - # `Envoy access logging formatting `_. See 'body' in the - # LogResource proto for more details. Example: ``body { string_value: - # "%PROTOCOL%" }``. - body: "_____opentelemetry_proto_common_v1__.AnyValue" = betterproto.message_field(2) - # See 'attributes' in the LogResource proto for more details. Example: - # ``attributes { values { key: "user_agent" value { string_value: "%REQ(USER- - # AGENT)%" } } }``. - attributes: "_____opentelemetry_proto_common_v1__.KeyValueList" = ( - betterproto.message_field(3) - ) - - -from ......opentelemetry.proto.common import v1 as _____opentelemetry_proto_common_v1__ -from ...grpc import v3 as __grpc_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/stream/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/stream/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/stream/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/stream/v3/__init__.py deleted file mode 100644 index 499647c..0000000 --- a/src/envoy_data_plane/envoy/extensions/access_loggers/stream/v3/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/access_loggers/stream/v3/stream.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class StdoutAccessLog(betterproto.Message): - """ - Custom configuration for an :ref:`AccessLog - ` that writes log entries - directly to the operating system's standard output. - """ - - # Configuration to form access log data and format. If not specified, use - # :ref:`default format `. - log_format: "____config_core_v3__.SubstitutionFormatString" = ( - betterproto.message_field(1, group="access_log_format") - ) - - -@dataclass(eq=False, repr=False) -class StderrAccessLog(betterproto.Message): - """ - Custom configuration for an :ref:`AccessLog - ` that writes log entries - directly to the operating system's standard error. - """ - - # Configuration to form access log data and format. If not specified, use - # :ref:`default format `. - log_format: "____config_core_v3__.SubstitutionFormatString" = ( - betterproto.message_field(1, group="access_log_format") - ) - - -from .....config.core import v3 as ____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/wasm/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/wasm/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/access_loggers/wasm/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/access_loggers/wasm/v3/__init__.py deleted file mode 100644 index 09680a7..0000000 --- a/src/envoy_data_plane/envoy/extensions/access_loggers/wasm/v3/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/access_loggers/wasm/v3/wasm.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class WasmAccessLog(betterproto.Message): - """ - Custom configuration for an :ref:`AccessLog - ` that calls into a WASM - VM. Configures the built-in *envoy.access_loggers.wasm* AccessLog. - """ - - config: "___wasm_v3__.PluginConfig" = betterproto.message_field(1) - - -from ....wasm import v3 as ___wasm_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/cache/__init__.py b/src/envoy_data_plane/envoy/extensions/cache/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/cache/simple_http_cache/__init__.py b/src/envoy_data_plane/envoy/extensions/cache/simple_http_cache/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/cache/simple_http_cache/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/cache/simple_http_cache/v3/__init__.py deleted file mode 100644 index c40ff4b..0000000 --- a/src/envoy_data_plane/envoy/extensions/cache/simple_http_cache/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/cache/simple_http_cache/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class SimpleHttpCacheConfig(betterproto.Message): - """[#extension: envoy.cache.simple_http_cache]""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/clusters/__init__.py b/src/envoy_data_plane/envoy/extensions/clusters/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/clusters/aggregate/__init__.py b/src/envoy_data_plane/envoy/extensions/clusters/aggregate/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/clusters/aggregate/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/clusters/aggregate/v3/__init__.py deleted file mode 100644 index d593ef0..0000000 --- a/src/envoy_data_plane/envoy/extensions/clusters/aggregate/v3/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/clusters/aggregate/v3/cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ClusterConfig(betterproto.Message): - """ - Configuration for the aggregate cluster. See the :ref:`architecture - overview ` for more information. - [#extension: envoy.clusters.aggregate] - """ - - # Load balancing clusters in aggregate cluster. Clusters are prioritized - # based on the order they appear in this list. - clusters: List[str] = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/clusters/dynamic_forward_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/clusters/dynamic_forward_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/clusters/dynamic_forward_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/clusters/dynamic_forward_proxy/v3/__init__.py deleted file mode 100644 index 2e0ea0b..0000000 --- a/src/envoy_data_plane/envoy/extensions/clusters/dynamic_forward_proxy/v3/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ClusterConfig(betterproto.Message): - """ - Configuration for the dynamic forward proxy cluster. See the - :ref:`architecture overview ` for - more information. [#extension: envoy.clusters.dynamic_forward_proxy] - """ - - # The DNS cache configuration that the cluster will attach to. Note this - # configuration must match that of associated :ref:`dynamic forward proxy - # HTTP filter configuration `. - dns_cache_config: "___common_dynamic_forward_proxy_v3__.DnsCacheConfig" = ( - betterproto.message_field(1) - ) - # If true allow the cluster configuration to disable the auto_sni and - # auto_san_validation options in the :ref:`cluster's - # upstream_http_protocol_options ` - allow_insecure_cluster_options: bool = betterproto.bool_field(2) - # [#not-implemented-hide:] If true allow HTTP/2 and HTTP/3 connections to be - # reused for requests to different origins than the connection was initially - # created for. This will only happen when the resolved address for the new - # connection matches the peer address of the connection and the TLS - # certificate is also valid for the new hostname. For example, if a - # connection has previously been established to foo.example.com at IP 1.2.3.4 - # with a certificate that is valid for `*.example.com`, then this connection - # could be used for requests to bar.example.com if that also resolved to - # 1.2.3.4. .. note:: By design, this feature will maximize reuse of - # connections. This means that instead opening a new connection when an - # existing connection reaches the maximum number of concurrent streams, - # requests will instead be sent to the existing connection. - # TODO(alyssawilk) implement request queueing in connections. .. note:: The - # coalesced connections might be to upstreams that would not be otherwise - # selected by Envoy. See the section `Connection Reuse in RFC 7540 - # `_ - allow_coalesced_connections: bool = betterproto.bool_field(3) - - -from ....common.dynamic_forward_proxy import v3 as ___common_dynamic_forward_proxy_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/clusters/redis/__init__.py b/src/envoy_data_plane/envoy/extensions/clusters/redis/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/clusters/redis/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/clusters/redis/v3/__init__.py deleted file mode 100644 index 7f14cf7..0000000 --- a/src/envoy_data_plane/envoy/extensions/clusters/redis/v3/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/clusters/redis/v3/redis_cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RedisClusterConfig(betterproto.Message): - """[#next-free-field: 7]""" - - # Interval between successive topology refresh requests. If not set, this - # defaults to 5s. - cluster_refresh_rate: timedelta = betterproto.message_field(1) - # Timeout for topology refresh request. If not set, this defaults to 3s. - cluster_refresh_timeout: timedelta = betterproto.message_field(2) - # The minimum interval that must pass after triggering a topology refresh - # request before a new request can possibly be triggered again. Any errors - # received during one of these time intervals are ignored. If not set, this - # defaults to 5s. - redirect_refresh_interval: timedelta = betterproto.message_field(3) - # The number of redirection errors that must be received before triggering a - # topology refresh request. If not set, this defaults to 5. If this is set to - # 0, topology refresh after redirect is disabled. - redirect_refresh_threshold: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # The number of failures that must be received before triggering a topology - # refresh request. If not set, this defaults to 0, which disables the - # topology refresh due to failure. - failure_refresh_threshold: int = betterproto.uint32_field(5) - # The number of hosts became degraded or unhealthy before triggering a - # topology refresh request. If not set, this defaults to 0, which disables - # the topology refresh due to degraded or unhealthy host. - host_degraded_refresh_threshold: int = betterproto.uint32_field(6) diff --git a/src/envoy_data_plane/envoy/extensions/common/__init__.py b/src/envoy_data_plane/envoy/extensions/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/common/dynamic_forward_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/common/dynamic_forward_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/common/dynamic_forward_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/common/dynamic_forward_proxy/v3/__init__.py deleted file mode 100644 index a412f1c..0000000 --- a/src/envoy_data_plane/envoy/extensions/common/dynamic_forward_proxy/v3/__init__.py +++ /dev/null @@ -1,142 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class DnsCacheCircuitBreakers(betterproto.Message): - """Configuration of circuit breakers for resolver.""" - - # The maximum number of pending requests that Envoy will allow to the - # resolver. If not specified, the default is 1024. - max_pending_requests: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class DnsCacheConfig(betterproto.Message): - """ - Configuration for the dynamic forward proxy DNS cache. See the - :ref:`architecture overview ` for - more information. [#next-free-field: 14] - """ - - # The name of the cache. Multiple named caches allow independent dynamic - # forward proxy configurations to operate within a single Envoy process using - # different configurations. All configurations with the same name *must* - # otherwise have the same settings when referenced from different - # configuration components. Configuration will fail to load if this is not - # the case. - name: str = betterproto.string_field(1) - # The DNS lookup family to use during resolution. - # [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy - # eyeballs" mode. The way this might work is a new lookup family which - # returns both IPv4 and IPv6 addresses, and then configures a host to have a - # primary and fall back address. With this, we could very likely build a - # "happy eyeballs" connection pool which would race the primary / fall back - # address and return the one that wins. This same method could potentially - # also be used for QUIC to TCP fall back.] - dns_lookup_family: "____config_cluster_v3__.ClusterDnsLookupFamily" = ( - betterproto.enum_field(2) - ) - # The DNS refresh rate for unresolved DNS hosts. If not specified defaults to - # 60s. The refresh rate is rounded to the closest millisecond, and must be at - # least 1ms. Once a host has been resolved, the refresh rate will be the DNS - # TTL, capped at a minimum of 5s. - dns_refresh_rate: timedelta = betterproto.message_field(3) - # The TTL for hosts that are unused. Hosts that have not been used in the - # configured time interval will be purged. If not specified defaults to 5m. - # .. note: The TTL is only checked at the time of DNS refresh, as specified - # by *dns_refresh_rate*. This means that if the configured TTL is shorter - # than the refresh rate the host may not be removed immediately. .. note: - # The TTL has no relation to DNS TTL and is only used to control Envoy's - # resource usage. - host_ttl: timedelta = betterproto.message_field(4) - # The maximum number of hosts that the cache will hold. If not specified - # defaults to 1024. .. note: The implementation is approximate and enforced - # independently on each worker thread, thus it is possible for the maximum - # hosts in the cache to go slightly above the configured value depending on - # timing. This is similar to how other circuit breakers work. - max_hosts: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # If the DNS failure refresh rate is specified, this is used as the cache's - # DNS refresh rate when DNS requests are failing. If this setting is not - # specified, the failure refresh rate defaults to the dns_refresh_rate. - dns_failure_refresh_rate: "____config_cluster_v3__.ClusterRefreshRate" = ( - betterproto.message_field(6) - ) - # The config of circuit breakers for resolver. It provides a configurable - # threshold. Envoy will use dns cache circuit breakers with default settings - # even if this value is not set. - dns_cache_circuit_breaker: "DnsCacheCircuitBreakers" = betterproto.message_field(7) - # Always use TCP queries instead of UDP queries for DNS lookups. This field - # is deprecated in favor of *dns_resolution_config* which aggregates all of - # the DNS resolver configuration in a single message. - use_tcp_for_dns_lookups: bool = betterproto.bool_field(8) - # DNS resolution configuration which includes the underlying dns resolver - # addresses and options. This field is deprecated in favor of - # :ref:`typed_dns_resolver_config `. - dns_resolution_config: "____config_core_v3__.DnsResolutionConfig" = ( - betterproto.message_field(9) - ) - # DNS resolver type configuration extension. This extension can be used to - # configure c-ares, apple, or any other DNS resolver types and the related - # parameters. For example, an object of :ref:`CaresDnsResolverConfig ` - # can be packed into this *typed_dns_resolver_config*. This configuration - # replaces the :ref:`dns_resolution_config ` - # configuration. During the transition period when both - # *dns_resolution_config* and *typed_dns_resolver_config* exists, when - # *typed_dns_resolver_config* is in place, Envoy will use it and ignore - # *dns_resolution_config*. When *typed_dns_resolver_config* is missing, the - # default behavior is in place. [#extension-category: - # envoy.network.dns_resolver] - typed_dns_resolver_config: "____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(12) - ) - # Hostnames that should be preresolved into the cache upon creation. This - # might provide a performance improvement, in the form of cache hits, for - # hostnames that are going to be resolved during steady state and are known - # at config load time. - preresolve_hostnames: List[ - "____config_core_v3__.SocketAddress" - ] = betterproto.message_field(10) - # The timeout used for DNS queries. This timeout is independent of any - # timeout and retry policy used by the underlying DNS implementation (e.g., - # c-areas and Apple DNS) which are opaque. Setting this timeout will ensure - # that queries succeed or fail within the specified time frame and are then - # retried using the standard refresh rates. Defaults to 5s if not set. - dns_query_timeout: timedelta = betterproto.message_field(11) - # [#not-implemented-hide:] Configuration to flush the DNS cache to long term - # storage. - key_value_config: "____config_common_key_value_v3__.KeyValueStoreConfig" = ( - betterproto.message_field(13) - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.use_tcp_for_dns_lookups: - warnings.warn( - "DnsCacheConfig.use_tcp_for_dns_lookups is deprecated", - DeprecationWarning, - ) - if self.dns_resolution_config: - warnings.warn( - "DnsCacheConfig.dns_resolution_config is deprecated", DeprecationWarning - ) - - -from .....config.cluster import v3 as ____config_cluster_v3__ -from .....config.common.key_value import v3 as ____config_common_key_value_v3__ -from .....config.core import v3 as ____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/common/matching/__init__.py b/src/envoy_data_plane/envoy/extensions/common/matching/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/common/matching/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/common/matching/v3/__init__.py deleted file mode 100644 index fd7fca2..0000000 --- a/src/envoy_data_plane/envoy/extensions/common/matching/v3/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/common/matching/v3/extension_matcher.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ExtensionWithMatcher(betterproto.Message): - """ - Wrapper around an existing extension that provides an associated matcher. - This allows decorating an existing extension with a matcher, which can be - used to match against relevant protocol data. - """ - - # The associated matcher. This is deprecated in favor of xds_matcher. - matcher: "____config_common_matcher_v3__.Matcher" = betterproto.message_field(1) - # The associated matcher. - xds_matcher: "_____xds_type_matcher_v3__.Matcher" = betterproto.message_field(3) - # The underlying extension config. - extension_config: "____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(2) - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.matcher: - warnings.warn( - "ExtensionWithMatcher.matcher is deprecated", DeprecationWarning - ) - - -from ......xds.type.matcher import v3 as _____xds_type_matcher_v3__ -from .....config.common.matcher import v3 as ____config_common_matcher_v3__ -from .....config.core import v3 as ____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/common/ratelimit/__init__.py b/src/envoy_data_plane/envoy/extensions/common/ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/common/ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/common/ratelimit/v3/__init__.py deleted file mode 100644 index 129b6b9..0000000 --- a/src/envoy_data_plane/envoy/extensions/common/ratelimit/v3/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/common/ratelimit/v3/ratelimit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimitDescriptor(betterproto.Message): - """ - A RateLimitDescriptor is a list of hierarchical entries that are used by - the service to determine the final rate limit key and overall allowed - limit. Here are some examples of how they might be used for the domain - "envoy". .. code-block:: cpp ["authenticated": "false"], - ["remote_address": "10.0.0.1"] What it does: Limits all unauthenticated - traffic for the IP address 10.0.0.1. The configuration supplies a default - limit for the *remote_address* key. If there is a desire to raise the limit - for 10.0.0.1 or block it entirely it can be specified directly in the - configuration. .. code-block:: cpp ["authenticated": "false"], ["path": - "/foo/bar"] What it does: Limits all unauthenticated traffic globally for a - specific path (or prefix if configured that way in the service). .. code- - block:: cpp ["authenticated": "false"], ["path": "/foo/bar"], - ["remote_address": "10.0.0.1"] What it does: Limits unauthenticated traffic - to a specific path for a specific IP address. Like (1) we can raise/block - specific IP addresses if we want with an override configuration. .. code- - block:: cpp ["authenticated": "true"], ["client_id": "foo"] What it does: - Limits all traffic for an authenticated client "foo" .. code-block:: cpp - ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] What - it does: Limits traffic to a specific path for an authenticated client - "foo" The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent - in 1 request if desired. This enables building complex application - scenarios with a generic backend. Optionally the descriptor can contain a - limit override under a "limit" key, that specifies the number of requests - per unit to use instead of the number configured in the rate limiting - service. - """ - - # Descriptor entries. - entries: List["RateLimitDescriptorEntry"] = betterproto.message_field(1) - # Optional rate limit override to supply to the ratelimit service. - limit: "RateLimitDescriptorRateLimitOverride" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimitDescriptorEntry(betterproto.Message): - # Descriptor key. - key: str = betterproto.string_field(1) - # Descriptor value. - value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimitDescriptorRateLimitOverride(betterproto.Message): - """ - Override rate limit to apply to this descriptor instead of the limit - configured in the rate limit service. See :ref:`rate limit override - ` for more information. - """ - - # The number of requests per unit of time. - requests_per_unit: int = betterproto.uint32_field(1) - # The unit of time. - unit: "____type_v3__.RateLimitUnit" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class LocalRateLimitDescriptor(betterproto.Message): - # Descriptor entries. - entries: List["RateLimitDescriptorEntry"] = betterproto.message_field(1) - # Token Bucket algorithm for local ratelimiting. - token_bucket: "____type_v3__.TokenBucket" = betterproto.message_field(2) - - -from .....type import v3 as ____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/common/tap/__init__.py b/src/envoy_data_plane/envoy/extensions/common/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/common/tap/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/common/tap/v3/__init__.py deleted file mode 100644 index 77c494c..0000000 --- a/src/envoy_data_plane/envoy/extensions/common/tap/v3/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/common/tap/v3/common.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CommonExtensionConfig(betterproto.Message): - """Common configuration for all tap extensions.""" - - # If specified, the tap filter will be configured via an admin handler. - admin_config: "AdminConfig" = betterproto.message_field(1, group="config_type") - # If specified, the tap filter will be configured via a static configuration - # that cannot be changed. - static_config: "____config_tap_v3__.TapConfig" = betterproto.message_field( - 2, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class AdminConfig(betterproto.Message): - """ - Configuration for the admin handler. See :ref:`here - ` for more information. - """ - - # Opaque configuration ID. When requests are made to the admin handler, the - # passed opaque ID is matched to the configured filter opaque ID to determine - # which filter to configure. - config_id: str = betterproto.string_field(1) - - -from .....config.tap import v3 as ____config_tap_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/compression/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/compression/brotli/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/brotli/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/compression/brotli/compressor/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/brotli/compressor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/compression/brotli/compressor/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/brotli/compressor/v3/__init__.py deleted file mode 100644 index 9e27536..0000000 --- a/src/envoy_data_plane/envoy/extensions/compression/brotli/compressor/v3/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/compression/brotli/compressor/v3/brotli.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class BrotliEncoderMode(betterproto.Enum): - DEFAULT = 0 - GENERIC = 1 - TEXT = 2 - FONT = 3 - - -@dataclass(eq=False, repr=False) -class Brotli(betterproto.Message): - """[#next-free-field: 7]""" - - # Value from 0 to 11 that controls the main compression speed-density lever. - # The higher quality, the slower compression. The default value is 3. - quality: Optional[int] = betterproto.message_field(1, wraps=betterproto.TYPE_UINT32) - # A value used to tune encoder for specific input. For more information about - # modes, please refer to brotli manual: https://brotli.org/encode.html#aa6f - # This field will be set to "DEFAULT" if not specified. - encoder_mode: "BrotliEncoderMode" = betterproto.enum_field(2) - # Value from 10 to 24 that represents the base two logarithmic of the - # compressor's window size. Larger window results in better compression at - # the expense of memory usage. The default is 18. For more details about this - # parameter, please refer to brotli manual: - # https://brotli.org/encode.html#a9a8 - window_bits: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Value from 16 to 24 that represents the base two logarithmic of the - # compressor's input block size. Larger input block results in better - # compression at the expense of memory usage. The default is 24. For more - # details about this parameter, please refer to brotli manual: - # https://brotli.org/encode.html#a9a8 - input_block_bits: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Value for compressor's next output buffer. If not set, defaults to 4096. - chunk_size: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) - # If true, disables "literal context modeling" format feature. This flag is a - # "decoding-speed vs compression ratio" trade-off. - disable_literal_context_modeling: bool = betterproto.bool_field(6) diff --git a/src/envoy_data_plane/envoy/extensions/compression/brotli/decompressor/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/brotli/decompressor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/compression/brotli/decompressor/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/brotli/decompressor/v3/__init__.py deleted file mode 100644 index 0cee587..0000000 --- a/src/envoy_data_plane/envoy/extensions/compression/brotli/decompressor/v3/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/compression/brotli/decompressor/v3/brotli.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Brotli(betterproto.Message): - # If true, disables "canny" ring buffer allocation strategy. Ring buffer is - # allocated according to window size, despite the real size of the content. - disable_ring_buffer_reallocation: bool = betterproto.bool_field(1) - # Value for decompressor's next output buffer. If not set, defaults to 4096. - chunk_size: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) diff --git a/src/envoy_data_plane/envoy/extensions/compression/gzip/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/gzip/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/compression/gzip/compressor/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/gzip/compressor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/compression/gzip/compressor/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/gzip/compressor/v3/__init__.py deleted file mode 100644 index 90d15fc..0000000 --- a/src/envoy_data_plane/envoy/extensions/compression/gzip/compressor/v3/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/compression/gzip/compressor/v3/gzip.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class GzipCompressionStrategy(betterproto.Enum): - DEFAULT_STRATEGY = 0 - FILTERED = 1 - HUFFMAN_ONLY = 2 - RLE = 3 - FIXED = 4 - - -class GzipCompressionLevel(betterproto.Enum): - DEFAULT_COMPRESSION = 0 - BEST_SPEED = 1 - COMPRESSION_LEVEL_1 = 1 - COMPRESSION_LEVEL_2 = 2 - COMPRESSION_LEVEL_3 = 3 - COMPRESSION_LEVEL_4 = 4 - COMPRESSION_LEVEL_5 = 5 - COMPRESSION_LEVEL_6 = 6 - COMPRESSION_LEVEL_7 = 7 - COMPRESSION_LEVEL_8 = 8 - COMPRESSION_LEVEL_9 = 9 - BEST_COMPRESSION = 9 - - -@dataclass(eq=False, repr=False) -class Gzip(betterproto.Message): - """[#next-free-field: 6]""" - - # Value from 1 to 9 that controls the amount of internal memory used by zlib. - # Higher values use more memory, but are faster and produce better - # compression results. The default value is 5. - memory_level: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # A value used for selecting the zlib compression level. This setting will - # affect speed and amount of compression applied to the content. - # "BEST_COMPRESSION" provides higher compression at the cost of higher - # latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides lower - # compression with minimum impact on response time, the same as - # "COMPRESSION_LEVEL_1". "DEFAULT_COMPRESSION" provides an optimal result - # between speed and compression. According to zlib's manual this level gives - # the same result as "COMPRESSION_LEVEL_6". This field will be set to - # "DEFAULT_COMPRESSION" if not specified. - compression_level: "GzipCompressionLevel" = betterproto.enum_field(2) - # A value used for selecting the zlib compression strategy which is directly - # related to the characteristics of the content. Most of the time - # "DEFAULT_STRATEGY" will be the best choice, which is also the default value - # for the parameter, though there are situations when changing this parameter - # might produce better results. For example, run-length encoding (RLE) is - # typically used when the content is known for having sequences which same - # data occurs many consecutive times. For more information about each - # strategy, please refer to zlib manual. - compression_strategy: "GzipCompressionStrategy" = betterproto.enum_field(3) - # Value from 9 to 15 that represents the base two logarithmic of the - # compressor's window size. Larger window results in better compression at - # the expense of memory usage. The default is 12 which will produce a 4096 - # bytes window. For more details about this parameter, please refer to zlib - # manual > deflateInit2. - window_bits: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Value for Zlib's next output buffer. If not set, defaults to 4096. See - # https://www.zlib.net/manual.html for more details. Also see - # https://github.com/envoyproxy/envoy/issues/8448 for context on this - # filter's performance. - chunk_size: Optional[int] = betterproto.message_field( - 5, wraps=betterproto.TYPE_UINT32 - ) diff --git a/src/envoy_data_plane/envoy/extensions/compression/gzip/decompressor/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/gzip/decompressor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/compression/gzip/decompressor/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/compression/gzip/decompressor/v3/__init__.py deleted file mode 100644 index 272b2e5..0000000 --- a/src/envoy_data_plane/envoy/extensions/compression/gzip/decompressor/v3/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/compression/gzip/decompressor/v3/gzip.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Gzip(betterproto.Message): - # Value from 9 to 15 that represents the base two logarithmic of the - # decompressor's window size. The decompression window size needs to be equal - # or larger than the compression window size. The default window size is 15. - # This is so that the decompressor can decompress a response compressed by a - # compressor with any compression window size. For more details about this - # parameter, please refer to `zlib manual - # `_ > inflateInit2. - window_bits: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Value for zlib's decompressor output buffer. If not set, defaults to 4096. - # See https://www.zlib.net/manual.html for more details. - chunk_size: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) diff --git a/src/envoy_data_plane/envoy/extensions/filters/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/dependency/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/dependency/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/dependency/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/dependency/v3/__init__.py deleted file mode 100644 index 90f11dc..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/common/dependency/v3/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/common/dependency/v3/dependency.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class DependencyDependencyType(betterproto.Enum): - HEADER = 0 - FILTER_STATE_KEY = 1 - DYNAMIC_METADATA = 2 - - -@dataclass(eq=False, repr=False) -class Dependency(betterproto.Message): - """Dependency specification and string identifier.""" - - # The kind of dependency. - type: "DependencyDependencyType" = betterproto.enum_field(1) - # The string identifier for the dependency. - name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class FilterDependencies(betterproto.Message): - """ - Dependency specification for a filter. For a filter chain to be valid, any - dependency that is required must be provided by an earlier filter. - """ - - # A list of dependencies required on the decode path. - decode_required: List["Dependency"] = betterproto.message_field(1) - # A list of dependencies provided on the encode path. - decode_provided: List["Dependency"] = betterproto.message_field(2) - # A list of dependencies required on the decode path. - encode_required: List["Dependency"] = betterproto.message_field(3) - # A list of dependencies provided on the encode path. - encode_provided: List["Dependency"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class MatchingRequirements(betterproto.Message): - """ - Matching requirements for a filter. For a match tree to be used with a - filter, the match requirements must be satisfied. This protobuf is provided - by the filter implementation as a way to communicate the matching - requirements to the filter factories, allowing for config rejection if the - requirements are not satisfied. - """ - - data_input_allow_list: "MatchingRequirementsDataInputAllowList" = ( - betterproto.message_field(1) - ) - - -@dataclass(eq=False, repr=False) -class MatchingRequirementsDataInputAllowList(betterproto.Message): - # An explicit list of data inputs that are allowed to be used with this - # filter. - type_url: List[str] = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/fault/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/fault/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/fault/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/fault/v3/__init__.py deleted file mode 100644 index 154f0e1..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/common/fault/v3/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/common/fault/v3/fault.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class FaultDelayFaultDelayType(betterproto.Enum): - FIXED = 0 - - -@dataclass(eq=False, repr=False) -class FaultDelay(betterproto.Message): - """ - Delay specification is used to inject latency into the HTTP/Mongo - operation. [#next-free-field: 6] - """ - - # Add a fixed delay before forwarding the operation upstream. See - # https://developers.google.com/protocol-buffers/docs/proto3#json for the - # JSON/YAML Duration mapping. For HTTP/Mongo, the specified delay will be - # injected before a new request/operation. This is required if type is FIXED. - fixed_delay: timedelta = betterproto.message_field(3, group="fault_delay_secifier") - # Fault delays are controlled via an HTTP header (if applicable). - header_delay: "FaultDelayHeaderDelay" = betterproto.message_field( - 5, group="fault_delay_secifier" - ) - # The percentage of operations/connections/requests on which the delay will - # be injected. - percentage: "_____type_v3__.FractionalPercent" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class FaultDelayHeaderDelay(betterproto.Message): - """ - Fault delays are controlled via an HTTP header (if applicable). See the - :ref:`HTTP fault filter ` - documentation for more information. - """ - - pass - - -@dataclass(eq=False, repr=False) -class FaultRateLimit(betterproto.Message): - """Describes a rate limit to be applied.""" - - # A fixed rate limit. - fixed_limit: "FaultRateLimitFixedLimit" = betterproto.message_field( - 1, group="limit_type" - ) - # Rate limits are controlled via an HTTP header (if applicable). - header_limit: "FaultRateLimitHeaderLimit" = betterproto.message_field( - 3, group="limit_type" - ) - # The percentage of operations/connections/requests on which the rate limit - # will be injected. - percentage: "_____type_v3__.FractionalPercent" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class FaultRateLimitFixedLimit(betterproto.Message): - """Describes a fixed/constant rate limit.""" - - # The limit supplied in KiB/s. - limit_kbps: int = betterproto.uint64_field(1) - - -@dataclass(eq=False, repr=False) -class FaultRateLimitHeaderLimit(betterproto.Message): - """ - Rate limits are controlled via an HTTP header (if applicable). See the - :ref:`HTTP fault filter ` - documentation for more information. - """ - - pass - - -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/matcher/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/matcher/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/matcher/action/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/matcher/action/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/common/matcher/action/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/common/matcher/action/v3/__init__.py deleted file mode 100644 index 0412f6c..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/common/matcher/action/v3/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/common/matcher/action/v3/skip_action.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class SkipFilter(betterproto.Message): - """ - Configuration for the SkipFilter match action. When matching results in - this action, the associated filter will be ignored for all filter callbacks - (e.g. `encodeHeaders`, `encodeData`, etc. for HTTP filters) after the - matcher arrives at the match, including the callback that caused the match - result. For example, when used with a HTTP filter and the match result was - resolved after receiving the HTTP response headers, the HTTP filter will - *not* receive the response header callback. As a result, if this match - action is resolved before the first filter callback (e.g. HTTP request - headers), the filter will be completely skipped. - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/adaptive_concurrency/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/adaptive_concurrency/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/adaptive_concurrency/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/adaptive_concurrency/v3/__init__.py deleted file mode 100644 index 9865155..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/adaptive_concurrency/v3/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GradientControllerConfig(betterproto.Message): - """Configuration parameters for the gradient controller.""" - - # The percentile to use when summarizing aggregated samples. Defaults to p50. - sample_aggregate_percentile: "_____type_v3__.Percent" = betterproto.message_field(1) - concurrency_limit_params: "GradientControllerConfigConcurrencyLimitCalculationParams" = betterproto.message_field( - 2 - ) - min_rtt_calc_params: "GradientControllerConfigMinimumRttCalculationParams" = ( - betterproto.message_field(3) - ) - - -@dataclass(eq=False, repr=False) -class GradientControllerConfigConcurrencyLimitCalculationParams(betterproto.Message): - """ - Parameters controlling the periodic recalculation of the concurrency limit - from sampled request latencies. - """ - - # The allowed upper-bound on the calculated concurrency limit. Defaults to - # 1000. - max_concurrency_limit: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The period of time samples are taken to recalculate the concurrency limit. - concurrency_update_interval: timedelta = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class GradientControllerConfigMinimumRttCalculationParams(betterproto.Message): - """ - Parameters controlling the periodic minRTT recalculation. [#next-free- - field: 6] - """ - - # The time interval between recalculating the minimum request round-trip - # time. Has to be positive. - interval: timedelta = betterproto.message_field(1) - # The number of requests to aggregate/sample during the minRTT recalculation - # window before updating. Defaults to 50. - request_count: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Randomized time delta that will be introduced to the start of the minRTT - # calculation window. This is represented as a percentage of the interval - # duration. Defaults to 15%. Example: If the interval is 10s and the jitter - # is 15%, the next window will begin somewhere in the range (10s - 11.5s). - jitter: "_____type_v3__.Percent" = betterproto.message_field(3) - # The concurrency limit set while measuring the minRTT. Defaults to 3. - min_concurrency: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - # Amount added to the measured minRTT to add stability to the concurrency - # limit during natural variability in latency. This is expressed as a - # percentage of the measured value and can be adjusted to allow more or less - # tolerance to the sampled latency values. Defaults to 25%. - buffer: "_____type_v3__.Percent" = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class AdaptiveConcurrency(betterproto.Message): - # Gradient concurrency control will be used. - gradient_controller_config: "GradientControllerConfig" = betterproto.message_field( - 1, group="concurrency_controller_config" - ) - # If set to false, the adaptive concurrency filter will operate as a pass- - # through filter. If the message is unspecified, the filter will be enabled. - enabled: "_____config_core_v3__.RuntimeFeatureFlag" = betterproto.message_field(2) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/admission_control/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/admission_control/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/admission_control/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/admission_control/v3/__init__.py deleted file mode 100644 index bf10058..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/admission_control/v3/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/admission_control/v3/admission_control.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class AdmissionControl(betterproto.Message): - """[#next-free-field: 8]""" - - # If set to false, the admission control filter will operate as a pass- - # through filter. If the message is unspecified, the filter will be enabled. - enabled: "_____config_core_v3__.RuntimeFeatureFlag" = betterproto.message_field(1) - success_criteria: "AdmissionControlSuccessCriteria" = betterproto.message_field( - 2, group="evaluation_criteria" - ) - # The sliding time window over which the success rate is calculated. The - # window is rounded to the nearest second. Defaults to 30s. - sampling_window: timedelta = betterproto.message_field(3) - # Rejection probability is defined by the formula:: max(0, (rq_count - - # rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression) The - # aggression dictates how heavily the admission controller will throttle - # requests upon SR dropping at or below the threshold. A value of 1 will - # result in a linear increase in rejection probability as SR drops. Any - # values less than 1.0, will be set to 1.0. If the message is unspecified, - # the aggression is 1.0. See `the admission control documentation `_ for a diagram illustrating this. - aggression: "_____config_core_v3__.RuntimeDouble" = betterproto.message_field(4) - # Dictates the success rate at which the rejection probability is non-zero. - # As success rate drops below this threshold, rejection probability will - # increase. Any success rate above the threshold results in a rejection - # probability of 0. Defaults to 95%. - sr_threshold: "_____config_core_v3__.RuntimePercent" = betterproto.message_field(5) - # If the average RPS of the sampling window is below this threshold, the - # request will not be rejected, even if the success rate is lower than - # sr_threshold. Defaults to 0. - rps_threshold: "_____config_core_v3__.RuntimeUInt32" = betterproto.message_field(6) - # The probability of rejection will never exceed this value, even if the - # failure rate is rising. Defaults to 80%. - max_rejection_probability: "_____config_core_v3__.RuntimePercent" = ( - betterproto.message_field(7) - ) - - -@dataclass(eq=False, repr=False) -class AdmissionControlSuccessCriteria(betterproto.Message): - """ - Default method of specifying what constitutes a successful request. All - status codes that indicate a successful request must be explicitly - specified if not relying on the default values. - """ - - # If HTTP criteria are unspecified, all HTTP status codes below 500 are - # treated as successful responses. .. note:: The default HTTP codes - # considered successful by the admission controller are done so due to the - # unlikelihood that sending fewer requests would change their behavior (for - # example: redirects, unauthorized access, or bad requests won't be - # alleviated by sending less traffic). - http_criteria: "AdmissionControlSuccessCriteriaHttpCriteria" = ( - betterproto.message_field(1) - ) - # GRPC status codes to consider as request successes. If unspecified, - # defaults to: Ok, Cancelled, Unknown, InvalidArgument, NotFound, - # AlreadyExists, Unauthenticated, FailedPrecondition, OutOfRange, - # PermissionDenied, and Unimplemented. .. note:: The default gRPC codes - # that are considered successful by the admission controller are chosen - # because of the unlikelihood that sending fewer requests will change the - # behavior. - grpc_criteria: "AdmissionControlSuccessCriteriaGrpcCriteria" = ( - betterproto.message_field(2) - ) - - -@dataclass(eq=False, repr=False) -class AdmissionControlSuccessCriteriaHttpCriteria(betterproto.Message): - # Status code ranges that constitute a successful request. Configurable codes - # are in the range [100, 600). - http_success_status: List["_____type_v3__.Int32Range"] = betterproto.message_field( - 1 - ) - - -@dataclass(eq=False, repr=False) -class AdmissionControlSuccessCriteriaGrpcCriteria(betterproto.Message): - # Status codes that constitute a successful request. Mappings can be found - # at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - grpc_success_status: List[int] = betterproto.uint32_field(1) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/alternate_protocols_cache/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/alternate_protocols_cache/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/alternate_protocols_cache/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/alternate_protocols_cache/v3/__init__.py deleted file mode 100644 index b5a6547..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/alternate_protocols_cache/v3/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """ - Configuration for the alternate protocols cache HTTP filter. [#extension: - envoy.filters.http.alternate_protocols_cache] - """ - - # If set, causes the use of the alternate protocols cache, which is - # responsible for parsing and caching HTTP Alt-Svc headers. This enables the - # use of HTTP/3 for upstream servers that advertise supporting it. - # TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled via - # auto_http. - alternate_protocols_cache_options: "_____config_core_v3__.AlternateProtocolsCacheOptions" = betterproto.message_field( - 1 - ) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/aws_lambda/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/aws_lambda/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/aws_lambda/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/aws_lambda/v3/__init__.py deleted file mode 100644 index 574a551..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/aws_lambda/v3/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ConfigInvocationMode(betterproto.Enum): - SYNCHRONOUS = 0 - ASYNCHRONOUS = 1 - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - """AWS Lambda filter config""" - - # The ARN of the AWS Lambda to invoke when the filter is engaged Must be in - # the following format: arn::lambda:::function: - arn: str = betterproto.string_field(1) - # Whether to transform the request (headers and body) to a JSON payload or - # pass it as is. - payload_passthrough: bool = betterproto.bool_field(2) - # Determines the way to invoke the Lambda function. - invocation_mode: "ConfigInvocationMode" = betterproto.enum_field(3) - - -@dataclass(eq=False, repr=False) -class PerRouteConfig(betterproto.Message): - """ - Per-route configuration for AWS Lambda. This can be useful when invoking a - different Lambda function or a different version of the same Lambda - depending on the route. - """ - - invoke_config: "Config" = betterproto.message_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/aws_request_signing/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/aws_request_signing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/aws_request_signing/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/aws_request_signing/v3/__init__.py deleted file mode 100644 index 3594841..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/aws_request_signing/v3/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class AwsRequestSigning(betterproto.Message): - """ - Top level configuration for the AWS request signing filter. [#next-free- - field: 6] - """ - - # The `service namespace `_ of the HTTP - # endpoint. Example: s3 - service_name: str = betterproto.string_field(1) - # The `region `_ - # hosting the HTTP endpoint. Example: us-west-2 - region: str = betterproto.string_field(2) - # Indicates that before signing headers, the host header will be swapped with - # this value. If not set or empty, the original host header value will be - # used and no rewrite will happen. Note: this rewrite affects both signing - # and host header forwarding. However, this option shouldn't be used with - # :ref:`HCM host rewrite - # ` - # given that the value set here would be used for signing whereas the value - # set in the HCM would be used for host header forwarding which is not the - # desired outcome. - host_rewrite: str = betterproto.string_field(3) - # Instead of buffering the request to calculate the payload hash, use the - # literal string ``UNSIGNED-PAYLOAD`` to calculate the payload hash. Not all - # services support this option. See the `S3 - # `_ policy for details. - use_unsigned_payload: bool = betterproto.bool_field(4) - # A list of request header string matchers that will be excluded from - # signing. The excluded header can be matched by any patterns defined in the - # StringMatcher proto (e.g. exact string, prefix, regex, etc). Example: - # match_excluded_headers: - prefix: x-envoy - exact: foo - exact: bar When - # applied, all headers that start with "x-envoy" and headers "foo" and "bar" - # will not be signed. - match_excluded_headers: List[ - "_____type_matcher_v3__.StringMatcher" - ] = betterproto.message_field(5) - - -from ......type.matcher import v3 as _____type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/bandwidth_limit/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/bandwidth_limit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/bandwidth_limit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/bandwidth_limit/v3/__init__.py deleted file mode 100644 index 96ff22d..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/bandwidth_limit/v3/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class BandwidthLimitEnableMode(betterproto.Enum): - DISABLED = 0 - REQUEST = 1 - RESPONSE = 2 - REQUEST_AND_RESPONSE = 3 - - -@dataclass(eq=False, repr=False) -class BandwidthLimit(betterproto.Message): - """[#next-free-field: 8]""" - - # The human readable prefix to use when emitting stats. - stat_prefix: str = betterproto.string_field(1) - # The enable mode for the bandwidth limit filter. Default is Disabled. - enable_mode: "BandwidthLimitEnableMode" = betterproto.enum_field(2) - # The limit supplied in KiB/s. .. note:: It's fine for the limit to be - # unset for the global configuration since the bandwidth limit can be - # applied at a the virtual host or route level. Thus, the limit must be set - # for the per route configuration otherwise the config will be rejected. .. - # note:: When using per route configuration, the limit becomes unique to - # that route. - limit_kbps: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT64 - ) - # Optional Fill interval in milliseconds for the token refills. Defaults to - # 50ms. It must be at least 20ms to avoid too aggressive refills. - fill_interval: timedelta = betterproto.message_field(4) - # Runtime flag that controls whether the filter is enabled or not. If not - # specified, defaults to enabled. - runtime_enabled: "_____config_core_v3__.RuntimeFeatureFlag" = ( - betterproto.message_field(5) - ) - # Enable response trailers. .. note:: * If set true, the response trailers - # *bandwidth-request-delay-ms* and *bandwidth-response-delay-ms* will be - # added, prefixed by *response_trailer_prefix*. * bandwidth-request-delay- - # ms: delay time in milliseconds it took for the request stream transfer. * - # bandwidth-response-delay-ms: delay time in milliseconds it took for the - # response stream transfer. * If :ref:`enable_mode ` is - # DISABLED or REQUEST, the trailers will not be set. * If both the request - # and response delay time is 0, the trailers will not be set. - enable_response_trailers: bool = betterproto.bool_field(6) - # Optional The prefix for the response trailers. - response_trailer_prefix: str = betterproto.string_field(7) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/buffer/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/buffer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/buffer/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/buffer/v3/__init__.py deleted file mode 100644 index 09d22ca..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/buffer/v3/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/buffer/v3/buffer.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Buffer(betterproto.Message): - # The maximum request size that the filter will buffer before the connection - # manager will stop buffering and return a 413 response. - max_request_bytes: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class BufferPerRoute(betterproto.Message): - # Disable the buffer filter for this particular vhost or route. - disabled: bool = betterproto.bool_field(1, group="override") - # Override the global configuration of the filter with this new config. - buffer: "Buffer" = betterproto.message_field(2, group="override") diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/cache/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/cache/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/cache/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/cache/v3/__init__.py deleted file mode 100644 index 42506e1..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/cache/v3/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/cache/v3/cache.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CacheConfig(betterproto.Message): - """[#extension: envoy.filters.http.cache]""" - - # Config specific to the cache storage implementation. [#extension-category: - # envoy.filters.http.cache] - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - # List of matching rules that defines allowed *Vary* headers. The *vary* - # response header holds a list of header names that affect the contents of a - # response, as described by - # https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. During - # insertion, *allowed_vary_headers* acts as a allowlist: if a response's - # *vary* header mentions any header names that aren't matched by any rules in - # *allowed_vary_headers*, that response will not be cached. During lookup, - # *allowed_vary_headers* controls what request headers will be sent to the - # cache storage implementation. - allowed_vary_headers: List[ - "_____type_matcher_v3__.StringMatcher" - ] = betterproto.message_field(2) - # [#not-implemented-hide:] - # Modifies cache key creation by restricting which parts of the URL are - # included. - key_creator_params: "CacheConfigKeyCreatorParams" = betterproto.message_field(3) - # [#not-implemented-hide:] Max body - # size the cache filter will insert into a cache. 0 means unlimited (though - # the cache storage implementation may have its own limit beyond which it - # will reject insertions). - max_body_bytes: int = betterproto.uint32_field(4) - - -@dataclass(eq=False, repr=False) -class CacheConfigKeyCreatorParams(betterproto.Message): - """ - [#not-implemented-hide:] Modifies cache key creation by restricting which - parts of the URL are included. - """ - - # If true, exclude the URL scheme from the cache key. Set to true if your - # origins always produce the same response for http and https requests. - exclude_scheme: bool = betterproto.bool_field(1) - # If true, exclude the host from the cache key. Set to true if your origins' - # responses don't ever depend on host. - exclude_host: bool = betterproto.bool_field(2) - # If *query_parameters_included* is nonempty, only query parameters matched - # by one or more of its matchers are included in the cache key. Any other - # query params will not affect cache lookup. - query_parameters_included: List[ - "_____config_route_v3__.QueryParameterMatcher" - ] = betterproto.message_field(3) - # If *query_parameters_excluded* is nonempty, query parameters matched by one - # or more of its matchers are excluded from the cache key (even if also - # matched by *query_parameters_included*), and will not affect cache lookup. - query_parameters_excluded: List[ - "_____config_route_v3__.QueryParameterMatcher" - ] = betterproto.message_field(4) - - -from ......config.route import v3 as _____config_route_v3__ -from ......type.matcher import v3 as _____type_matcher_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/cdn_loop/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/cdn_loop/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/cdn_loop/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/cdn_loop/v3/__init__.py deleted file mode 100644 index 916d1f7..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/cdn_loop/v3/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CdnLoopConfig(betterproto.Message): - """ - CDN-Loop Header filter config. See the :ref:`configuration overview - ` for more information. - """ - - # The CDN identifier to use for loop checks and to append to the CDN-Loop - # header. RFC 8586 calls this the cdn-id. The cdn-id can either be a - # pseudonym or hostname the CDN is in control of. cdn_id must not be empty. - cdn_id: str = betterproto.string_field(1) - # The maximum allowed count of cdn_id in the downstream CDN-Loop request - # header. The default of 0 means a request can transit the CdnLoopFilter - # once. A value of 1 means that a request can transit the CdnLoopFilter twice - # and so on. - max_allowed_occurrences: int = betterproto.uint32_field(2) diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/composite/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/composite/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/composite/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/composite/v3/__init__.py deleted file mode 100644 index 1fd3598..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/composite/v3/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/composite/v3/composite.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Composite(betterproto.Message): - """ - :ref:`Composite filter ` config. The - composite filter config allows delegating filter handling to another filter - as determined by matching on the request headers. This makes it possible to - use different filters or filter configurations based on the incoming - request. This is intended to be used with :ref:`ExtensionWithMatcher - ` - where a match tree is specified that indicates (via - :ref:`ExecuteFilterAction `) which filter configuration to create and - delegate to. - """ - - pass - - -@dataclass(eq=False, repr=False) -class ExecuteFilterAction(betterproto.Message): - """ - Composite match action (see :ref:`matching docs - ` for more info on match actions). This - specifies the filter configuration of the filter that the composite filter - should delegate filter interactions to. - """ - - typed_config: "_____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(1) - ) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/compressor/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/compressor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/compressor/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/compressor/v3/__init__.py deleted file mode 100644 index b8c497c..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/compressor/v3/__init__.py +++ /dev/null @@ -1,136 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/compressor/v3/compressor.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Compressor(betterproto.Message): - """[#next-free-field: 9]""" - - # Minimum response length, in bytes, which will trigger compression. The - # default value is 30. - content_length: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # Set of strings that allows specifying which mime-types yield compression; - # e.g., application/json, text/html, etc. When this field is not defined, - # compression will be applied to the following mime-types: - # "application/javascript", "application/json", "application/xhtml+xml", - # "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" and - # their synonyms. - content_type: List[str] = betterproto.string_field(2) - # If true, disables compression when the response contains an etag header. - # When it is false, the filter will preserve weak etags and remove the ones - # that require strong validation. - disable_on_etag_header: bool = betterproto.bool_field(3) - # If true, removes accept-encoding from the request headers before - # dispatching it to the upstream so that responses do not get compressed - # before reaching the filter. .. attention:: To avoid interfering with - # other compression filters in the same chain use this option in the - # filter closest to the upstream. - remove_accept_encoding_header: bool = betterproto.bool_field(4) - # Runtime flag that controls whether the filter is enabled or not. If set to - # false, the filter will operate as a pass-through filter. If not specified, - # defaults to enabled. - runtime_enabled: "_____config_core_v3__.RuntimeFeatureFlag" = ( - betterproto.message_field(5) - ) - # A compressor library to use for compression. Currently only :ref:`envoy.com - # pression.gzip.compressor` is included in Envoy. [#extension-category: - # envoy.compression.compressor] - compressor_library: "_____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(6) - ) - # Configuration for request compression. Compression is disabled by default - # if left empty. - request_direction_config: "CompressorRequestDirectionConfig" = ( - betterproto.message_field(7) - ) - # Configuration for response compression. Compression is enabled by default - # if left empty. .. attention:: If the field is not empty then the - # duplicate deprecated fields of the `Compressor` message, such as - # `content_length`, `content_type`, `disable_on_etag_header`, - # `remove_accept_encoding_header` and `runtime_enabled`, are ignored. Also - # all the statistics related to response compression will be rooted in `.compressor...response.*` instead of `.compressor...*`. - response_direction_config: "CompressorResponseDirectionConfig" = ( - betterproto.message_field(8) - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.content_length: - warnings.warn("Compressor.content_length is deprecated", DeprecationWarning) - if self.content_type: - warnings.warn("Compressor.content_type is deprecated", DeprecationWarning) - if self.disable_on_etag_header: - warnings.warn( - "Compressor.disable_on_etag_header is deprecated", DeprecationWarning - ) - if self.remove_accept_encoding_header: - warnings.warn( - "Compressor.remove_accept_encoding_header is deprecated", - DeprecationWarning, - ) - if self.runtime_enabled: - warnings.warn( - "Compressor.runtime_enabled is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class CompressorCommonDirectionConfig(betterproto.Message): - # Runtime flag that controls whether compression is enabled or not for the - # direction this common config is put in. If set to false, the filter will - # operate as a pass-through filter in the chosen direction. If the field is - # omitted, the filter will be enabled. - enabled: "_____config_core_v3__.RuntimeFeatureFlag" = betterproto.message_field(1) - # Minimum value of Content-Length header of request or response messages - # (depending on the direction this common config is put in), in bytes, which - # will trigger compression. The default value is 30. - min_content_length: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # Set of strings that allows specifying which mime-types yield compression; - # e.g., application/json, text/html, etc. When this field is not defined, - # compression will be applied to the following mime-types: - # "application/javascript", "application/json", "application/xhtml+xml", - # "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" and - # their synonyms. - content_type: List[str] = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class CompressorRequestDirectionConfig(betterproto.Message): - """Configuration for filter behavior on the request direction.""" - - common_config: "CompressorCommonDirectionConfig" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CompressorResponseDirectionConfig(betterproto.Message): - """Configuration for filter behavior on the response direction.""" - - common_config: "CompressorCommonDirectionConfig" = betterproto.message_field(1) - # If true, disables compression when the response contains an etag header. - # When it is false, the filter will preserve weak etags and remove the ones - # that require strong validation. - disable_on_etag_header: bool = betterproto.bool_field(2) - # If true, removes accept-encoding from the request headers before - # dispatching it to the upstream so that responses do not get compressed - # before reaching the filter. .. attention:: To avoid interfering with - # other compression filters in the same chain use this option in the - # filter closest to the upstream. - remove_accept_encoding_header: bool = betterproto.bool_field(3) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/cors/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/cors/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/cors/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/cors/v3/__init__.py deleted file mode 100644 index 5d4b201..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/cors/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/cors/v3/cors.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Cors(betterproto.Message): - """Cors filter config.""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/csrf/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/csrf/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/csrf/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/csrf/v3/__init__.py deleted file mode 100644 index 562b1bd..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/csrf/v3/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/csrf/v3/csrf.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CsrfPolicy(betterproto.Message): - """CSRF filter config.""" - - # Specifies the % of requests for which the CSRF filter is enabled. If - # :ref:`runtime_key - # ` - # is specified, Envoy will lookup the runtime key to get the percentage of - # requests to filter. .. note:: This field defaults to 100/:ref:`HUNDRED - # `. - filter_enabled: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(1) - ) - # Specifies that CSRF policies will be evaluated and tracked, but not - # enforced. This is intended to be used when ``filter_enabled`` is off and - # will be ignored otherwise. If :ref:`runtime_key - # ` - # is specified, Envoy will lookup the runtime key to get the percentage of - # requests for which it will evaluate and track the request's *Origin* and - # *Destination* to determine if it's valid, but will not enforce any - # policies. - shadow_enabled: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(2) - ) - # Specifies additional source origins that will be allowed in addition to the - # destination origin. More information on how this can be configured via - # runtime can be found :ref:`here `. - additional_origins: List[ - "_____type_matcher_v3__.StringMatcher" - ] = betterproto.message_field(3) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type.matcher import v3 as _____type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/decompressor/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/decompressor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/decompressor/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/decompressor/v3/__init__.py deleted file mode 100644 index 033b6ab..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/decompressor/v3/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/decompressor/v3/decompressor.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Decompressor(betterproto.Message): - # A decompressor library to use for both request and response decompression. - # Currently only :ref:`envoy.compression.gzip.compressor` is included in Envoy. - # [#extension-category: envoy.compression.decompressor] - decompressor_library: "_____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(1) - ) - # Configuration for request decompression. Decompression is enabled by - # default if left empty. - request_direction_config: "DecompressorRequestDirectionConfig" = ( - betterproto.message_field(2) - ) - # Configuration for response decompression. Decompression is enabled by - # default if left empty. - response_direction_config: "DecompressorResponseDirectionConfig" = ( - betterproto.message_field(3) - ) - - -@dataclass(eq=False, repr=False) -class DecompressorCommonDirectionConfig(betterproto.Message): - """ - Common configuration for filter behavior on both the request and response - direction. - """ - - # Runtime flag that controls whether the filter is enabled for decompression - # or not. If set to false, the filter will operate as a pass-through filter. - # If the message is unspecified, the filter will be enabled. - enabled: "_____config_core_v3__.RuntimeFeatureFlag" = betterproto.message_field(1) - # If set to true, will decompress response even if a *no-transform* cache - # control header is set. - ignore_no_transform_header: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class DecompressorRequestDirectionConfig(betterproto.Message): - """Configuration for filter behavior on the request direction.""" - - common_config: "DecompressorCommonDirectionConfig" = betterproto.message_field(1) - # If set to true, and response decompression is enabled, the filter modifies - # the Accept-Encoding request header by appending the decompressor_library's - # encoding. Defaults to true. - advertise_accept_encoding: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - - -@dataclass(eq=False, repr=False) -class DecompressorResponseDirectionConfig(betterproto.Message): - """Configuration for filter behavior on the response direction.""" - - common_config: "DecompressorCommonDirectionConfig" = betterproto.message_field(1) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/dynamic_forward_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/dynamic_forward_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/dynamic_forward_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/dynamic_forward_proxy/v3/__init__.py deleted file mode 100644 index 707f7cb..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/dynamic_forward_proxy/v3/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """ - Configuration for the dynamic forward proxy HTTP filter. See the - :ref:`architecture overview ` for - more information. [#extension: envoy.filters.http.dynamic_forward_proxy] - """ - - # The DNS cache configuration that the filter will attach to. Note this - # configuration must match that of associated :ref:`dynamic forward proxy - # cluster configuration `. - dns_cache_config: "____common_dynamic_forward_proxy_v3__.DnsCacheConfig" = ( - betterproto.message_field(1) - ) - # When this flag is set, the filter will add the resolved upstream address in - # the filter state. The state should be saved with key - # `envoy.stream.upstream_address` (See - # :repo:`upstream_address.h`). - save_upstream_address: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class PerRouteConfig(betterproto.Message): - """Per route Configuration for the dynamic forward proxy HTTP filter.""" - - # Indicates that before DNS lookup, the host header will be swapped with this - # value. If not set or empty, the original host header value will be used and - # no rewrite will happen. Note: this rewrite affects both DNS lookup and host - # header forwarding. However, this option shouldn't be used with :ref:`HCM - # host rewrite - # ` - # given that the value set here would be used for DNS lookups whereas the - # value set in the HCM would be used for host header forwarding which is not - # the desired outcome. - host_rewrite_literal: str = betterproto.string_field( - 1, group="host_rewrite_specifier" - ) - # Indicates that before DNS lookup, the host header will be swapped with the - # value of this header. If not set or empty, the original host header value - # will be used and no rewrite will happen. Note: this rewrite affects both - # DNS lookup and host header forwarding. However, this option shouldn't be - # used with :ref:`HCM host rewrite header - # ` given - # that the value set here would be used for DNS lookups whereas the value set - # in the HCM would be used for host header forwarding which is not the - # desired outcome. .. note:: If the header appears multiple times only the - # first value is used. - host_rewrite_header: str = betterproto.string_field( - 2, group="host_rewrite_specifier" - ) - - -from .....common.dynamic_forward_proxy import ( - v3 as ____common_dynamic_forward_proxy_v3__, -) diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/dynamo/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/dynamo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/dynamo/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/dynamo/v3/__init__.py deleted file mode 100644 index 721f18e..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/dynamo/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/dynamo/v3/dynamo.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Dynamo(betterproto.Message): - """Dynamo filter config.""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ext_authz/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ext_authz/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ext_authz/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ext_authz/v3/__init__.py deleted file mode 100644 index 8731e97..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/ext_authz/v3/__init__.py +++ /dev/null @@ -1,283 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ExtAuthz(betterproto.Message): - """[#next-free-field: 16]""" - - # gRPC service configuration (default timeout: 200ms). - grpc_service: "_____config_core_v3__.GrpcService" = betterproto.message_field( - 1, group="services" - ) - # HTTP service configuration (default timeout: 200ms). - http_service: "HttpService" = betterproto.message_field(3, group="services") - # API version for ext_authz transport protocol. This describes the ext_authz - # gRPC endpoint and version of messages used on the wire. - transport_api_version: "_____config_core_v3__.ApiVersion" = betterproto.enum_field( - 12 - ) - # Changes filter's behaviour on errors: 1. When set to true, the filter will - # *accept* client request even if the communication with the authorization - # service has failed, or if the authorization service has returned a HTTP 5xx - # error. 2. When set to false, ext-authz will *reject* client requests and - # return a *Forbidden* response if the communication with the authorization - # service has failed, or if the authorization service has returned a HTTP - # 5xx error. Note that errors can be *always* tracked in the :ref:`stats - # `. - failure_mode_allow: bool = betterproto.bool_field(2) - # Enables filter to buffer the client request body and send it within the - # authorization request. A ``x-envoy-auth-partial-body: false|true`` metadata - # header will be added to the authorization request message indicating if the - # body data is partial. - with_request_body: "BufferSettings" = betterproto.message_field(5) - # Clears route cache in order to allow the external authorization service to - # correctly affect routing decisions. Filter clears all cached routes when: - # 1. The field is set to *true*. 2. The status returned from the - # authorization service is a HTTP 200 or gRPC 0. 3. At least one - # *authorization response header* is added to the client request, or is used - # for altering another client request header. - clear_route_cache: bool = betterproto.bool_field(6) - # Sets the HTTP status that is returned to the client when there is a network - # error between the filter and the authorization server. The default status - # is HTTP 403 Forbidden. - status_on_error: "_____type_v3__.HttpStatus" = betterproto.message_field(7) - # Specifies a list of metadata namespaces whose values, if present, will be - # passed to the ext_authz service as an opaque *protobuf::Struct*. For - # example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata ` is set, then the following will pass the jwt payload to the - # authorization server. .. code-block:: yaml metadata_context_namespaces: - # - envoy.filters.http.jwt_authn - metadata_context_namespaces: List[str] = betterproto.string_field(8) - # Specifies if the filter is enabled. If :ref:`runtime_key - # ` - # is specified, Envoy will lookup the runtime key to get the percentage of - # requests to filter. If this field is not specified, the filter will be - # enabled for all requests. - filter_enabled: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(9) - ) - # Specifies if the filter is enabled with metadata matcher. If this field is - # not specified, the filter will be enabled for all requests. - filter_enabled_metadata: "_____type_matcher_v3__.MetadataMatcher" = ( - betterproto.message_field(14) - ) - # Specifies whether to deny the requests, when the filter is disabled. If - # :ref:`runtime_key - # ` is - # specified, Envoy will lookup the runtime key to determine whether to deny - # request for filter protected path at filter disabling. If filter is - # disabled in typed_per_filter_config for the path, requests will not be - # denied. If this field is not specified, all requests will be allowed when - # disabled. - deny_at_disable: "_____config_core_v3__.RuntimeFeatureFlag" = ( - betterproto.message_field(11) - ) - # Specifies if the peer certificate is sent to the external service. When - # this field is true, Envoy will include the peer X.509 certificate, if - # available, in the :ref:`certificate`. - include_peer_certificate: bool = betterproto.bool_field(10) - # Optional additional prefix to use when emitting statistics. This allows to - # distinguish emitted statistics between configured *ext_authz* filters in an - # HTTP filter chain. For example: .. code-block:: yaml http_filters: - - # name: envoy.filters.http.ext_authz typed_config: "@type": - # type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - # stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. - # - name: envoy.filters.http.ext_authz typed_config: "@type": - # type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - # stat_prefix: blocker # This emits ext_authz.blocker.ok, - # ext_authz.blocker.denied, etc. - stat_prefix: str = betterproto.string_field(13) - # Optional labels that will be passed to :ref:`labels` in :ref:`destination`. The labels will be - # read from :ref:`metadata` with the - # specified key. - bootstrap_metadata_labels_key: str = betterproto.string_field(15) - - -@dataclass(eq=False, repr=False) -class BufferSettings(betterproto.Message): - """Configuration for buffering the request data.""" - - # Sets the maximum size of a message body that the filter will hold in - # memory. Envoy will return *HTTP 413* and will *not* initiate the - # authorization process when buffer reaches the number set in this field. - # Note that this setting will have precedence over :ref:`failure_mode_allow < - # envoy_v3_api_field_extensions.filters.http.ext_authz.v3.ExtAuthz.failure_mo - # de_allow>`. - max_request_bytes: int = betterproto.uint32_field(1) - # When this field is true, Envoy will buffer the message until - # *max_request_bytes* is reached. The authorization request will be - # dispatched and no 413 HTTP error will be returned by the filter. - allow_partial_message: bool = betterproto.bool_field(2) - # If true, the body sent to the external authorization service is set with - # raw bytes, it sets the :ref:`raw_body` field of HTTP request attribute - # context. Otherwise, :ref:` - # body` - # will be filled with UTF-8 string request body. - pack_as_bytes: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class HttpService(betterproto.Message): - """ - HttpService is used for raw HTTP communication between the filter and the - authorization service. When configured, the filter will parse the client - request and use these attributes to call the authorization server. - Depending on the response, the filter may reject or accept the client - request. Note that in any of these events, metadata can be added, removed - or overridden by the filter: *On authorization request*, a list of allowed - request headers may be supplied. See :ref:`allowed_headers ` for details. Additional headers metadata may be added to the - authorization request. See :ref:`headers_to_add ` for - details. On authorization response status HTTP 200 OK, the filter will - allow traffic to the upstream and additional headers metadata may be added - to the original client request. See :ref:`allowed_upstream_headers ` for details. Additionally, the filter may add - additional headers to the client's response. See - :ref:`allowed_client_headers_on_success ` for details. On other authorization response statuses, the filter will - not allow traffic. Additional headers metadata as well as body may be added - to the client's response. See :ref:`allowed_client_headers ` for details. [#next-free-field: 9] - """ - - # Sets the HTTP server URI which the authorization requests must be sent to. - server_uri: "_____config_core_v3__.HttpUri" = betterproto.message_field(1) - # Sets a prefix to the value of authorization request header *Path*. - path_prefix: str = betterproto.string_field(2) - # Settings used for controlling authorization request metadata. - authorization_request: "AuthorizationRequest" = betterproto.message_field(7) - # Settings used for controlling authorization response metadata. - authorization_response: "AuthorizationResponse" = betterproto.message_field(8) - - -@dataclass(eq=False, repr=False) -class AuthorizationRequest(betterproto.Message): - # Authorization request includes the client request headers that have a - # correspondent match in the :ref:`list - # `. .. note:: In - # addition to the the user's supplied matchers, ``Host``, ``Method``, - # ``Path``, ``Content-Length``, and ``Authorization`` are **automatically - # included** to the list. .. note:: By default, ``Content-Length`` header - # is set to ``0`` and the request to the authorization service has no - # message body. However, the authorization request *may* include the buffered - # client request body (controlled by :ref:`with_request_body ` - # setting) hence the value of its ``Content-Length`` reflects the size of its - # payload size. - allowed_headers: "_____type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(1) - ) - # Sets a list of headers that will be included to the request to - # authorization service. Note that client request of the same key will be - # overridden. - headers_to_add: List[ - "_____config_core_v3__.HeaderValue" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class AuthorizationResponse(betterproto.Message): - """[#next-free-field: 6]""" - - # When this :ref:`list ` - # is set, authorization response headers that have a correspondent match will - # be added to the original client request. Note that coexistent headers will - # be overridden. - allowed_upstream_headers: "_____type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(1) - ) - # When this :ref:`list ` - # is set, authorization response headers that have a correspondent match will - # be added to the client's response. Note that coexistent headers will be - # appended. - allowed_upstream_headers_to_append: "_____type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(3) - ) - # When this :ref:`list `. - # is set, authorization response headers that have a correspondent match will - # be added to the client's response. Note that when this list is *not* set, - # all the authorization response headers, except *Authority (Host)* will be - # in the response to the client. When a header is included in this list, - # *Path*, *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are - # automatically added. - allowed_client_headers: "_____type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(2) - ) - # When this :ref:`list `. - # is set, authorization response headers that have a correspondent match will - # be added to the client's response when the authorization response itself is - # successful, i.e. not failed or denied. When this list is *not* set, no - # additional headers will be added to the client's response on success. - allowed_client_headers_on_success: "_____type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(4) - ) - # When this :ref:`list ` - # is set, authorization response headers that have a correspondent match will - # be emitted as dynamic metadata to be consumed by the next filter. This - # metadata lives in a namespace specified by the canonical name of extension - # filter that requires it: - :ref:`envoy.filters.http.ext_authz - # ` for HTTP filter. - - # :ref:`envoy.filters.network.ext_authz - # ` for network filter. - dynamic_metadata_from_headers: "_____type_matcher_v3__.ListStringMatcher" = ( - betterproto.message_field(5) - ) - - -@dataclass(eq=False, repr=False) -class ExtAuthzPerRoute(betterproto.Message): - """Extra settings on a per virtualhost/route/weighted-cluster level.""" - - # Disable the ext auth filter for this particular vhost or route. If disabled - # is specified in multiple per-filter-configs, the most specific one will be - # used. - disabled: bool = betterproto.bool_field(1, group="override") - # Check request settings for this route. - check_settings: "CheckSettings" = betterproto.message_field(2, group="override") - - -@dataclass(eq=False, repr=False) -class CheckSettings(betterproto.Message): - """Extra settings for the check request.""" - - # Context extensions to set on the CheckRequest's :ref:`AttributeContext.cont - # ext_extensions` You can use this to provide extra context for the external - # authorization server on specific virtual hosts/routes. For example, adding - # a context extension on the virtual host level can give the ext-authz server - # information on what virtual host is used without needing to parse the host - # header. If CheckSettings is specified in multiple per-filter-configs, they - # will be merged in order, and the result will be used. Merge semantics for - # this field are such that keys from more specific configs override. .. - # note:: These settings are only applied to a filter configured with a :r - # ef:`grpc_service`. - context_extensions: Dict[str, str] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # When set to true, disable the configured :ref:`with_request_body ` - # for a route. - disable_request_body_buffering: bool = betterproto.bool_field(2) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type import v3 as _____type_v3__ -from ......type.matcher import v3 as _____type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ext_proc/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ext_proc/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ext_proc/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ext_proc/v3/__init__.py deleted file mode 100644 index baf32da..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/ext_proc/v3/__init__.py +++ /dev/null @@ -1,129 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto, envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ProcessingModeHeaderSendMode(betterproto.Enum): - DEFAULT = 0 - SEND = 1 - SKIP = 2 - - -class ProcessingModeBodySendMode(betterproto.Enum): - NONE = 0 - STREAMED = 1 - BUFFERED = 2 - BUFFERED_PARTIAL = 3 - - -@dataclass(eq=False, repr=False) -class ProcessingMode(betterproto.Message): - """[#next-free-field: 7]""" - - # How to handle the request header. Default is "SEND". - request_header_mode: "ProcessingModeHeaderSendMode" = betterproto.enum_field(1) - # How to handle the response header. Default is "SEND". - response_header_mode: "ProcessingModeHeaderSendMode" = betterproto.enum_field(2) - # How to handle the request body. Default is "NONE". - request_body_mode: "ProcessingModeBodySendMode" = betterproto.enum_field(3) - # How do handle the response body. Default is "NONE". - response_body_mode: "ProcessingModeBodySendMode" = betterproto.enum_field(4) - # How to handle the request trailers. Default is "SKIP". - request_trailer_mode: "ProcessingModeHeaderSendMode" = betterproto.enum_field(5) - # How to handle the response trailers. Default is "SKIP". - response_trailer_mode: "ProcessingModeHeaderSendMode" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class ExternalProcessor(betterproto.Message): - """[#next-free-field: 9]""" - - # Configuration for the gRPC service that the filter will communicate with. - # The filter supports both the "Envoy" and "Google" gRPC clients. - grpc_service: "_____config_core_v3__.GrpcService" = betterproto.message_field(1) - # By default, if the gRPC stream cannot be established, or if it is closed - # prematurely with an error, the filter will fail. Specifically, if the - # response headers have not yet been delivered, then it will return a 500 - # error downstream. If they have been delivered, then instead the HTTP stream - # to the downstream client will be reset. With this parameter set to true, - # however, then if the gRPC stream is prematurely closed or could not be - # opened, processing continues without error. - failure_mode_allow: bool = betterproto.bool_field(2) - # Specifies default options for how HTTP headers, trailers, and bodies are - # sent. See ProcessingMode for details. - processing_mode: "ProcessingMode" = betterproto.message_field(3) - # [#not-implemented-hide:] If true, send each part of the HTTP request or - # response specified by ProcessingMode asynchronously -- in other words, send - # the message on the gRPC stream and then continue filter processing. If - # false, which is the default, suspend filter execution after each message is - # sent to the remote service and wait up to "message_timeout" for a reply. - async_mode: bool = betterproto.bool_field(4) - # [#not-implemented-hide:] Envoy provides a number of :ref:`attributes - # ` for expressive policies. Each attribute name - # provided in this field will be matched against that list and populated in - # the request_headers message. See the :ref:`attribute documentation - # ` for the list of supported attributes - # and their types. - request_attributes: List[str] = betterproto.string_field(5) - # [#not-implemented-hide:] Envoy provides a number of :ref:`attributes - # ` for expressive policies. Each attribute name - # provided in this field will be matched against that list and populated in - # the response_headers message. See the :ref:`attribute documentation - # ` for the list of supported attributes and their - # types. - response_attributes: List[str] = betterproto.string_field(6) - # Specifies the timeout for each individual message sent on the stream and - # when the filter is running in synchronous mode. Whenever the proxy sends a - # message on the stream that requires a response, it will reset this timer, - # and will stop processing and return an error (subject to the processing - # mode) if the timer expires before a matching response. is received. There - # is no timeout when the filter is running in asynchronous mode. Default is - # 200 milliseconds. - message_timeout: timedelta = betterproto.message_field(7) - # [#not-implemented-hide:] Optional additional prefix to use when emitting - # statistics. This allows to distinguish emitted statistics between - # configured *ext_proc* filters in an HTTP filter chain. - stat_prefix: str = betterproto.string_field(8) - - -@dataclass(eq=False, repr=False) -class ExtProcPerRoute(betterproto.Message): - """ - Extra settings that may be added to per-route configuration for a virtual - host or cluster. - """ - - # Disable the filter for this particular vhost or route. If disabled is - # specified in multiple per-filter-configs, the most specific one will be - # used. - disabled: bool = betterproto.bool_field(1, group="override") - # Override aspects of the configuration for this route. A set of overrides in - # a more specific configuration will override a "disabled" flag set in a - # less-specific one. - overrides: "ExtProcOverrides" = betterproto.message_field(2, group="override") - - -@dataclass(eq=False, repr=False) -class ExtProcOverrides(betterproto.Message): - """Overrides that may be set on a per-route basis""" - - # Set a different processing mode for this route than the default. - processing_mode: "ProcessingMode" = betterproto.message_field(1) - # [#not-implemented-hide:] Set a different asynchronous processing option - # than the default. - async_mode: bool = betterproto.bool_field(2) - # [#not-implemented-hide:] Set different optional properties than the - # default. - request_properties: List[str] = betterproto.string_field(3) - # [#not-implemented-hide:] Set different optional properties than the - # default. - response_properties: List[str] = betterproto.string_field(4) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/fault/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/fault/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/fault/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/fault/v3/__init__.py deleted file mode 100644 index e1e2055..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/fault/v3/__init__.py +++ /dev/null @@ -1,130 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/fault/v3/fault.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FaultAbort(betterproto.Message): - """[#next-free-field: 6]""" - - # HTTP status code to use to abort the HTTP request. - http_status: int = betterproto.uint32_field(2, group="error_type") - # gRPC status code to use to abort the gRPC request. - grpc_status: int = betterproto.uint32_field(5, group="error_type") - # Fault aborts are controlled via an HTTP header (if applicable). - header_abort: "FaultAbortHeaderAbort" = betterproto.message_field( - 4, group="error_type" - ) - # The percentage of requests/operations/connections that will be aborted with - # the error code provided. - percentage: "_____type_v3__.FractionalPercent" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class FaultAbortHeaderAbort(betterproto.Message): - """ - Fault aborts are controlled via an HTTP header (if applicable). See the - :ref:`HTTP fault filter ` - documentation for more information. - """ - - pass - - -@dataclass(eq=False, repr=False) -class HttpFault(betterproto.Message): - """[#next-free-field: 16]""" - - # If specified, the filter will inject delays based on the values in the - # object. - delay: "___common_fault_v3__.FaultDelay" = betterproto.message_field(1) - # If specified, the filter will abort requests based on the values in the - # object. At least *abort* or *delay* must be specified. - abort: "FaultAbort" = betterproto.message_field(2) - # Specifies the name of the (destination) upstream cluster that the filter - # should match on. Fault injection will be restricted to requests bound to - # the specific upstream cluster. - upstream_cluster: str = betterproto.string_field(3) - # Specifies a set of headers that the filter should match on. The fault - # injection filter can be applied selectively to requests that match a set of - # headers specified in the fault filter config. The chances of actual fault - # injection further depend on the value of the :ref:`percentage ` field. The - # filter will check the request's headers against all the specified headers - # in the filter config. A match will happen if all the headers in the config - # are present in the request with the same values (or based on presence if - # the *value* field is not in the config). - headers: List["_____config_route_v3__.HeaderMatcher"] = betterproto.message_field(4) - # Faults are injected for the specified list of downstream hosts. If this - # setting is not set, faults are injected for all downstream nodes. - # Downstream node name is taken from :ref:`the HTTP x-envoy-downstream- - # service-node ` header - # and compared against downstream_nodes list. - downstream_nodes: List[str] = betterproto.string_field(5) - # The maximum number of faults that can be active at a single time via the - # configured fault filter. Note that because this setting can be overridden - # at the route level, it's possible for the number of active faults to be - # greater than this value (if injected via a different route). If not - # specified, defaults to unlimited. This setting can be overridden via - # `runtime ` and any faults that - # are not injected due to overflow will be indicated via the `faults_overflow - # ` stat. .. attention:: Like - # other :ref:`circuit breakers ` in Envoy, this - # is a fuzzy limit. It's possible for the number of active faults to rise - # slightly above the configured amount due to the implementation details. - max_active_faults: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # The response rate limit to be applied to the response body of the stream. - # When configured, the percentage can be overridden by the - # :ref:`fault.http.rate_limit.response_percent - # ` runtime key. .. attention:: - # This is a per-stream limit versus a connection level limit. This means that - # concurrent streams will each get an independent limit. - response_rate_limit: "___common_fault_v3__.FaultRateLimit" = ( - betterproto.message_field(7) - ) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.delay.fixed_delay_percent - delay_percent_runtime: str = betterproto.string_field(8) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.abort.abort_percent - abort_percent_runtime: str = betterproto.string_field(9) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.delay.fixed_duration_ms - delay_duration_runtime: str = betterproto.string_field(10) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.abort.http_status - abort_http_status_runtime: str = betterproto.string_field(11) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.max_active_faults - max_active_faults_runtime: str = betterproto.string_field(12) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.rate_limit.response_percent - response_rate_limit_percent_runtime: str = betterproto.string_field(13) - # The runtime key to override the :ref:`default - # ` runtime. The default is: - # fault.http.abort.grpc_status - abort_grpc_status_runtime: str = betterproto.string_field(14) - # To control whether stats storage is allocated dynamically for each - # downstream server. If set to true, "x-envoy-downstream-service-cluster" - # field of header will be ignored by this filter. If set to false, dynamic - # stats storage will be allocated for the downstream cluster name. Default - # value is false. - disable_downstream_cluster_stats: bool = betterproto.bool_field(15) - - -from ......config.route import v3 as _____config_route_v3__ -from ......type import v3 as _____type_v3__ -from ....common.fault import v3 as ___common_fault_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_bridge/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_bridge/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_bridge/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_bridge/v3/__init__.py deleted file mode 100644 index d98d773..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_bridge/v3/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - """gRPC HTTP/1.1 Bridge filter config.""" - - # If true then requests with content type set to `application/x-protobuf` - # will be automatically converted to gRPC. This works by prepending the - # payload data with the gRPC header frame, as defined by the wiring format, - # and Content-Type will be updated accordingly before sending the request. - # For the requests that went through this upgrade the filter will also strip - # the frame before forwarding the response to the client. - upgrade_protobuf_to_grpc: bool = betterproto.bool_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/__init__.py deleted file mode 100644 index 871f7b1..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """gRPC reverse bridge filter configuration""" - - # The content-type to pass to the upstream when the gRPC bridge filter is - # applied. The filter will also validate that the upstream responds with the - # same content type. - content_type: str = betterproto.string_field(1) - # If true, Envoy will assume that the upstream doesn't understand gRPC frames - # and strip the gRPC frame from the request, and add it back in to the - # response. This will hide the gRPC semantics from the upstream, allowing it - # to receive and respond with a simple binary encoded protobuf. In order to - # calculate the `Content-Length` header value, Envoy will buffer the upstream - # response unless :ref:`response_size_header - # ` is set, in which case Envoy will use the value of an upstream header to - # calculate the content length. - withhold_grpc_frames: bool = betterproto.bool_field(2) - # When :ref:`withhold_grpc_frames ` is true, - # this option controls how Envoy calculates the `Content-Length`. When - # *response_size_header* is empty, Envoy will buffer the upstream response to - # calculate its size. When *response_size_header* is set to a non-empty - # string, Envoy will stream the response to the downstream and it will use - # the value of the response header with this name to set the `Content-Length` - # header and gRPC frame size. If the header with this name is repeated, only - # the first value will be used. Envoy will treat the upstream response as an - # error if this option is specified and the header is missing or if the value - # does not match the actual response body size. - response_size_header: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class FilterConfigPerRoute(betterproto.Message): - """ - gRPC reverse bridge filter configuration per virtualhost/route/weighted- - cluster level. - """ - - # If true, disables gRPC reverse bridge filter for this particular vhost or - # route. If disabled is specified in multiple per-filter-configs, the most - # specific one will be used. - disabled: bool = betterproto.bool_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_json_transcoder/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_json_transcoder/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_json_transcoder/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_json_transcoder/v3/__init__.py deleted file mode 100644 index 68f7902..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_json_transcoder/v3/__init__.py +++ /dev/null @@ -1,178 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class GrpcJsonTranscoderUrlUnescapeSpec(betterproto.Enum): - ALL_CHARACTERS_EXCEPT_RESERVED = 0 - ALL_CHARACTERS_EXCEPT_SLASH = 1 - ALL_CHARACTERS = 2 - - -@dataclass(eq=False, repr=False) -class GrpcJsonTranscoder(betterproto.Message): - """ - [#next-free-field: 14] GrpcJsonTranscoder filter configuration. The filter - itself can be used per route / per virtual host or on the general level. - The most specific one is being used for a given route. If the list of - services is empty - filter is considered to be disabled. Note that if - specifying the filter per route, first the route is matched, and then - transcoding filter is applied. It matters when specifying the route - configuration and paths to match the request - for per-route grpc - transcoder configs, the original path should be matched, while in other - cases, the grpc-like path is expected (the one AFTER the filter is - applied). - """ - - # Supplies the filename of :ref:`the proto descriptor set - # ` for the gRPC services. - proto_descriptor: str = betterproto.string_field(1, group="descriptor_set") - # Supplies the binary content of :ref:`the proto descriptor set - # ` for the gRPC services. - proto_descriptor_bin: bytes = betterproto.bytes_field(4, group="descriptor_set") - # A list of strings that supplies the fully qualified service names (i.e. - # "package_name.service_name") that the transcoder will translate. If the - # service name doesn't exist in ``proto_descriptor``, Envoy will fail at - # startup. The ``proto_descriptor`` may contain more services than the - # service names specified here, but they won't be translated. By default, the - # filter will pass through requests that do not map to any specified - # services. If the list of services is empty, filter is considered disabled. - # However, this behavior changes if :ref:`reject_unknown_method ` is enabled. - services: List[str] = betterproto.string_field(2) - # Control options for response JSON. These options are passed directly to - # `JsonPrintOptions `_. - print_options: "GrpcJsonTranscoderPrintOptions" = betterproto.message_field(3) - # Whether to keep the incoming request route after the outgoing headers have - # been transformed to the match the upstream gRPC service. Note: This means - # that routes for gRPC services that are not transcoded cannot be used in - # combination with *match_incoming_request_route*. - match_incoming_request_route: bool = betterproto.bool_field(5) - # A list of query parameters to be ignored for transcoding method mapping. By - # default, the transcoder filter will not transcode a request if there are - # any unknown/invalid query parameters. Example : .. code-block:: proto - # service Bookstore { rpc GetShelf(GetShelfRequest) returns (Shelf) { - # option (google.api.http) = { get: "/shelves/{shelf}" }; - # } } message GetShelfRequest { int64 shelf = 1; } - # message Shelf {} The request ``/shelves/100?foo=bar`` will not be mapped to - # ``GetShelf``` because variable binding for ``foo`` is not defined. Adding - # ``foo`` to ``ignored_query_parameters`` will allow the same request to be - # mapped to ``GetShelf``. - ignored_query_parameters: List[str] = betterproto.string_field(6) - # Whether to route methods without the ``google.api.http`` option. Example : - # .. code-block:: proto package bookstore; service Bookstore { - # rpc GetShelf(GetShelfRequest) returns (Shelf) {} } message - # GetShelfRequest { int64 shelf = 1; } message Shelf {} The - # client could ``post`` a json body ``{"shelf": 1234}`` with the path of - # ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - auto_mapping: bool = betterproto.bool_field(7) - # Whether to ignore query parameters that cannot be mapped to a corresponding - # protobuf field. Use this if you cannot control the query parameters and do - # not know them beforehand. Otherwise use ``ignored_query_parameters``. - # Defaults to false. - ignore_unknown_query_parameters: bool = betterproto.bool_field(8) - # Whether to convert gRPC status headers to JSON. When trailer indicates a - # gRPC error and there was no HTTP body, take ``google.rpc.Status`` from the - # ``grpc-status-details-bin`` header and use it as JSON body. If there was no - # such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - # ``grpc-message`` headers. The error details types must be present in the - # ``proto_descriptor``. For example, if an upstream server replies with - # headers: .. code-block:: none grpc-status: 5 grpc-status-details- - # bin: - # CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - # The ``grpc-status-details-bin`` header contains a base64-encoded protobuf - # message ``google.rpc.Status``. It will be transcoded into: .. code-block:: - # none HTTP/1.1 404 Not Found content-type: application/json {"co - # de":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","req - # uestId":"r-1"}]} In order to transcode the message, the - # ``google.rpc.RequestInfo`` type from the ``google/rpc/error_details.proto`` - # should be included in the configured :ref:`proto descriptor set - # `. - convert_grpc_status: bool = betterproto.bool_field(9) - # URL unescaping policy. This spec is only applied when extracting variable - # with multiple segments in the URL path. For example, in case of - # `/foo/{x=*}/bar/{y=prefix/*}/{z=**}` `x` variable is single segment and `y` - # and `z` are multiple segments. For a path with - # `/foo/first/bar/prefix/second/third/fourth`, `x=first`, `y=prefix/second`, - # `z=third/fourth`. If this setting is not specified, the value defaults to : - # ref:`ALL_CHARACTERS_EXCEPT_RESERVED`. - url_unescape_spec: "GrpcJsonTranscoderUrlUnescapeSpec" = betterproto.enum_field(10) - # If true, unescape '+' to space when extracting variables in query - # parameters. This is to support `HTML 2.0 - # `_ - query_param_unescape_plus: bool = betterproto.bool_field(12) - # If true, try to match the custom verb even if it is unregistered. By - # default, only match when it is registered. According to the http template - # `syntax `_, the custom verb is **":" LITERAL** at the end of - # http template. For a request with */foo/bar:baz* and *:baz* is not - # registered in any url_template, here is the behavior change - if the field - # is not set, *:baz* will not be treated as custom verb, so it will match - # **/foo/{x=*}**. - if the field is set, *:baz* is treated as custom verb, - # so it will NOT match **/foo/{x=*}** since the template doesn't use any - # custom verb. - match_unregistered_custom_verb: bool = betterproto.bool_field(13) - # Configure the behavior when handling requests that cannot be transcoded. By - # default, the transcoder will silently pass through HTTP requests that are - # malformed. This includes requests with unknown query parameters, unregister - # paths, etc. Set these options to enable strict HTTP request validation, - # resulting in the transcoder rejecting such requests with a ``HTTP 4xx``. - # See each individual option for more details on the validation. gRPC - # requests will still silently pass through without transcoding. The benefit - # is a proper error message to the downstream. If the upstream is a gRPC - # server, it cannot handle the passed-through HTTP requests and will reset - # the TCP connection. The downstream will then receive a ``HTTP 503 Service - # Unavailable`` due to the upstream connection reset. This incorrect error - # message may conflict with other Envoy components, such as retry policies. - request_validation_options: "GrpcJsonTranscoderRequestValidationOptions" = ( - betterproto.message_field(11) - ) - - -@dataclass(eq=False, repr=False) -class GrpcJsonTranscoderPrintOptions(betterproto.Message): - # Whether to add spaces, line breaks and indentation to make the JSON output - # easy to read. Defaults to false. - add_whitespace: bool = betterproto.bool_field(1) - # Whether to always print primitive fields. By default primitive fields with - # default values will be omitted in JSON output. For example, an int32 field - # set to 0 will be omitted. Setting this flag to true will override the - # default behavior and print primitive fields regardless of their values. - # Defaults to false. - always_print_primitive_fields: bool = betterproto.bool_field(2) - # Whether to always print enums as ints. By default they are rendered as - # strings. Defaults to false. - always_print_enums_as_ints: bool = betterproto.bool_field(3) - # Whether to preserve proto field names. By default protobuf will generate - # JSON field names using the ``json_name`` option, or lower camel case, in - # that order. Setting this flag will preserve the original field names. - # Defaults to false. - preserve_proto_field_names: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class GrpcJsonTranscoderRequestValidationOptions(betterproto.Message): - # By default, a request that cannot be mapped to any specified gRPC - # :ref:`services ` will pass-through this filter. When - # set to true, the request will be rejected with a ``HTTP 404 Not Found``. - reject_unknown_method: bool = betterproto.bool_field(1) - # By default, a request with query parameters that cannot be mapped to the - # gRPC request message will pass-through this filter. When set to true, the - # request will be rejected with a ``HTTP 400 Bad Request``. The fields - # :ref:`ignore_unknown_query_parameters ` and :ref:`ignored_query_parameters ` have priority over this strict validation behavior. - reject_unknown_query_parameters: bool = betterproto.bool_field(2) diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_stats/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_stats/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_stats/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_stats/v3/__init__.py deleted file mode 100644 index 1a8590c..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_stats/v3/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/grpc_stats/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """gRPC statistics filter configuration""" - - # If true, the filter maintains a filter state object with the request and - # response message counts. - emit_filter_state: bool = betterproto.bool_field(1) - # If set, specifies an allowlist of service/methods that will have individual - # stats emitted for them. Any call that does not match the allowlist will be - # counted in a stat with no method specifier: `cluster..grpc.*`. - individual_method_stats_allowlist: "_____config_core_v3__.GrpcMethodList" = ( - betterproto.message_field(2, group="per_method_stat_specifier") - ) - # If set to true, emit stats for all service/method names. If set to false, - # emit stats for all service/message types to the same stats without - # including the service/method in the name, with prefix - # `cluster..grpc`. This can be useful if service/method granularity is - # not needed, or if each cluster only receives a single method. .. - # attention:: This option is only safe if all clients are trusted. If this - # option is enabled with untrusted clients, the clients could cause - # unbounded growth in the number of stats in Envoy, using unbounded memory - # and potentially slowing down stats pipelines. .. attention:: If neither - # `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the - # behavior will default to `stats_for_all_methods=false`. This default value - # is changed due to the previous value being deprecated. This behavior can - # be changed with runtime override `envoy.deprecated_features.grpc_stats_fi - # lter_enable_stats_for_all_methods_by_default`. - stats_for_all_methods: Optional[bool] = betterproto.message_field( - 3, wraps=betterproto.TYPE_BOOL, group="per_method_stat_specifier" - ) - # If true, the filter will gather a histogram for the request time of the - # upstream. It works with :ref:`stats_for_all_methods ` and - # :ref:`individual_method_stats_allowlist ` the - # same way request_message_count and response_message_count works. - enable_upstream_stats: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class FilterObject(betterproto.Message): - """gRPC statistics filter state object in protobuf form.""" - - # Count of request messages in the request stream. - request_message_count: int = betterproto.uint64_field(1) - # Count of response messages in the response stream. - response_message_count: int = betterproto.uint64_field(2) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_web/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_web/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_web/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/grpc_web/v3/__init__.py deleted file mode 100644 index c1b5269..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/grpc_web/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GrpcWeb(betterproto.Message): - """gRPC Web filter config.""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/gzip/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/gzip/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/gzip/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/gzip/v3/__init__.py deleted file mode 100644 index 5d2659e..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/gzip/v3/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/gzip/v3/gzip.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class GzipCompressionStrategy(betterproto.Enum): - DEFAULT = 0 - FILTERED = 1 - HUFFMAN = 2 - RLE = 3 - - -class GzipCompressionLevelEnum(betterproto.Enum): - DEFAULT = 0 - BEST = 1 - SPEED = 2 - - -@dataclass(eq=False, repr=False) -class Gzip(betterproto.Message): - """[#next-free-field: 12]""" - - # Value from 1 to 9 that controls the amount of internal memory used by zlib. - # Higher values use more memory, but are faster and produce better - # compression results. The default value is 5. - memory_level: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - # A value used for selecting the zlib compression level. This setting will - # affect speed and amount of compression applied to the content. "BEST" - # provides higher compression at the cost of higher latency, "SPEED" provides - # lower compression with minimum impact on response time. "DEFAULT" provides - # an optimal result between speed and compression. This field will be set to - # "DEFAULT" if not specified. - compression_level: "GzipCompressionLevelEnum" = betterproto.enum_field(3) - # A value used for selecting the zlib compression strategy which is directly - # related to the characteristics of the content. Most of the time "DEFAULT" - # will be the best choice, though there are situations which changing this - # parameter might produce better results. For example, run-length encoding - # (RLE) is typically used when the content is known for having sequences - # which same data occurs many consecutive times. For more information about - # each strategy, please refer to zlib manual. - compression_strategy: "GzipCompressionStrategy" = betterproto.enum_field(4) - # Value from 9 to 15 that represents the base two logarithmic of the - # compressor's window size. Larger window results in better compression at - # the expense of memory usage. The default is 12 which will produce a 4096 - # bytes window. For more details about this parameter, please refer to zlib - # manual > deflateInit2. - window_bits: Optional[int] = betterproto.message_field( - 9, wraps=betterproto.TYPE_UINT32 - ) - # Set of configuration parameters common for all compression filters. You can - # define `content_length`, `content_type` and other parameters in this field. - compressor: "__compressor_v3__.Compressor" = betterproto.message_field(10) - # Value for Zlib's next output buffer. If not set, defaults to 4096. See - # https://www.zlib.net/manual.html for more details. Also see - # https://github.com/envoyproxy/envoy/issues/8448 for context on this - # filter's performance. - chunk_size: Optional[int] = betterproto.message_field( - 11, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class GzipCompressionLevel(betterproto.Message): - pass - - -from ...compressor import v3 as __compressor_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/header_to_metadata/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/header_to_metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/header_to_metadata/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/header_to_metadata/v3/__init__.py deleted file mode 100644 index a5d17d6..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/header_to_metadata/v3/__init__.py +++ /dev/null @@ -1,86 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ConfigValueType(betterproto.Enum): - STRING = 0 - NUMBER = 1 - PROTOBUF_VALUE = 2 - - -class ConfigValueEncode(betterproto.Enum): - NONE = 0 - BASE64 = 1 - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - # The list of rules to apply to requests. - request_rules: List["ConfigRule"] = betterproto.message_field(1) - # The list of rules to apply to responses. - response_rules: List["ConfigRule"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ConfigKeyValuePair(betterproto.Message): - """[#next-free-field: 7]""" - - # The namespace — if this is empty, the filter's namespace will be used. - metadata_namespace: str = betterproto.string_field(1) - # The key to use within the namespace. - key: str = betterproto.string_field(2) - # The value to pair with the given key. When used for a - # :ref:`on_header_present ` case, if value is non-empty - # it'll be used instead of the header value. If both are empty, no metadata - # is added. When used for a :ref:`on_header_missing ` - # case, a non-empty value must be provided otherwise no metadata is added. - value: str = betterproto.string_field(3) - # If present, the header's value will be matched and substituted with this. - # If there is no match or substitution, the header value is used as-is. This - # is only used for :ref:`on_header_present `. Note: if - # the `value` field is non-empty this field should be empty. - regex_value_rewrite: "_____type_matcher_v3__.RegexMatchAndSubstitute" = ( - betterproto.message_field(6) - ) - # The value's type — defaults to string. - type: "ConfigValueType" = betterproto.enum_field(4) - # How is the value encoded, default is NONE (not encoded). The value will be - # decoded accordingly before storing to metadata. - encode: "ConfigValueEncode" = betterproto.enum_field(5) - - -@dataclass(eq=False, repr=False) -class ConfigRule(betterproto.Message): - """ - A Rule defines what metadata to apply when a header is present or missing. - [#next-free-field: 6] - """ - - # Specifies that a match will be performed on the value of a header or a - # cookie. The header to be extracted. - header: str = betterproto.string_field(1) - # The cookie to be extracted. - cookie: str = betterproto.string_field(5) - # If the header or cookie is present, apply this metadata KeyValuePair. If - # the value in the KeyValuePair is non-empty, it'll be used instead of the - # header or cookie value. - on_header_present: "ConfigKeyValuePair" = betterproto.message_field(2) - # If the header or cookie is not present, apply this metadata KeyValuePair. - # The value in the KeyValuePair must be set, since it'll be used in lieu of - # the missing header or cookie value. - on_header_missing: "ConfigKeyValuePair" = betterproto.message_field(3) - # Whether or not to remove the header after a rule is applied. This prevents - # headers from leaking. This field is not supported in case of a cookie. - remove: bool = betterproto.bool_field(4) - - -from ......type.matcher import v3 as _____type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/health_check/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/health_check/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/health_check/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/health_check/v3/__init__.py deleted file mode 100644 index f8efce3..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/health_check/v3/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/health_check/v3/health_check.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HealthCheck(betterproto.Message): - """[#next-free-field: 6]""" - - # Specifies whether the filter operates in pass through mode or not. - pass_through_mode: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # If operating in pass through mode, the amount of time in milliseconds that - # the filter should cache the upstream response. - cache_time: timedelta = betterproto.message_field(3) - # If operating in non-pass-through mode, specifies a set of upstream cluster - # names and the minimum percentage of servers in each of those clusters that - # must be healthy or degraded in order for the filter to return a 200. .. - # note:: This value is interpreted as an integer by truncating, so 12.50% - # will be calculated as if it were 12%. - cluster_min_healthy_percentages: Dict[ - str, "_____type_v3__.Percent" - ] = betterproto.map_field(4, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # Specifies a set of health check request headers to match on. The health - # check filter will check a request’s headers against all the specified - # headers. To specify the health check endpoint, set the ``:path`` header to - # match on. - headers: List["_____config_route_v3__.HeaderMatcher"] = betterproto.message_field(5) - - -from ......config.route import v3 as _____config_route_v3__ -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ip_tagging/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ip_tagging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ip_tagging/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ip_tagging/v3/__init__.py deleted file mode 100644 index b29a40a..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/ip_tagging/v3/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class IpTaggingRequestType(betterproto.Enum): - BOTH = 0 - INTERNAL = 1 - EXTERNAL = 2 - - -@dataclass(eq=False, repr=False) -class IpTagging(betterproto.Message): - # The type of request the filter should apply to. - request_type: "IpTaggingRequestType" = betterproto.enum_field(1) - # [#comment:TODO(ccaraman): Extend functionality to load IP tags from file - # system. Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] - # The set of IP tags for the filter. - ip_tags: List["IpTaggingIpTag"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class IpTaggingIpTag(betterproto.Message): - """Supplies the IP tag name and the IP address subnets.""" - - # Specifies the IP tag name to apply. - ip_tag_name: str = betterproto.string_field(1) - # A list of IP address subnets that will be tagged with ip_tag_name. Both - # IPv4 and IPv6 are supported. - ip_list: List["_____config_core_v3__.CidrRange"] = betterproto.message_field(2) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/jwt_authn/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/jwt_authn/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/jwt_authn/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/jwt_authn/v3/__init__.py deleted file mode 100644 index 43f2334..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/jwt_authn/v3/__init__.py +++ /dev/null @@ -1,440 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/jwt_authn/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class JwtProvider(betterproto.Message): - """ - Please see following for JWT authentication flow: * `JSON Web Token (JWT) - `_ * `The OAuth 2.0 Authorization - Framework `_ * `OpenID Connect - `_ A JwtProvider message specifies how a JSON - Web Token (JWT) can be verified. It specifies: * issuer: the principal that - issues the JWT. If specified, it has to match the *iss* field in JWT. * - allowed audiences: the ones in the token have to be listed here. * how to - fetch public key JWKS to verify the token signature. * how to extract JWT - token in the request. * how to pass successfully verified token payload. - Example: .. code-block:: yaml issuer: https://example.com - audiences: - bookstore_android.apps.googleusercontent.com - - bookstore_web.apps.googleusercontent.com remote_jwks: http_uri: - uri: https://example.com/.well-known/jwks.json cluster: - example_jwks_cluster timeout: 1s cache_duration: - seconds: 300 [#next-free-field: 15] - """ - - # Specify the `principal - # `_ that issued the JWT, - # usually a URL or an email address. It is optional. If specified, it has to - # match the *iss* field in JWT. If a JWT has *iss* field and this field is - # specified, they have to match, otherwise the JWT *iss* field is not - # checked. Note: *JwtRequirement* :ref:`allow_missing ` and - # :ref:`allow_missing_or_failed ` are implemented - # differently than other *JwtRequirements*. Hence the usage of this field is - # different as follows if *allow_missing* or *allow_missing_or_failed* is - # used: * If a JWT has *iss* field, it needs to be specified by this field in - # one of *JwtProviders*. * If a JWT doesn't have *iss* field, one of - # *JwtProviders* should fill this field empty. * Multiple *JwtProviders* - # should not have same value in this field. Example: - # https://securetoken.google.com Example: - # 1234567-compute@developer.gserviceaccount.com - issuer: str = betterproto.string_field(1) - # The list of JWT `audiences - # `_ are allowed to - # access. A JWT containing any of these audiences will be accepted. If not - # specified, will not check audiences in the token. Example: .. code-block:: - # yaml audiences: - bookstore_android.apps.googleusercontent.com - # - bookstore_web.apps.googleusercontent.com - audiences: List[str] = betterproto.string_field(2) - # JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies - # the remote HTTP URI and how the fetched JWKS should be cached. Example: .. - # code-block:: yaml remote_jwks: http_uri: uri: - # https://www.googleapis.com/oauth2/v1/certs cluster: - # jwt.www.googleapis.com|443 timeout: 1s cache_duration: - # seconds: 300 - remote_jwks: "RemoteJwks" = betterproto.message_field( - 3, group="jwks_source_specifier" - ) - # JWKS is in local data source. It could be either in a local file or - # embedded in the inline_string. Example: local file .. code-block:: yaml - # local_jwks: filename: /etc/envoy/jwks/jwks1.txt Example: inline_string - # .. code-block:: yaml local_jwks: inline_string: ACADADADADA - local_jwks: "_____config_core_v3__.DataSource" = betterproto.message_field( - 4, group="jwks_source_specifier" - ) - # If false, the JWT is removed in the request after a success verification. - # If true, the JWT is not removed in the request. Default value is false. - # caveat: only works for from_header & has no effect for JWTs extracted - # through from_params & from_cookies. - forward: bool = betterproto.bool_field(5) - # Two fields below define where to extract the JWT from an HTTP request. If - # no explicit location is specified, the following default locations are - # tried in order: 1. The Authorization header using the `Bearer schema - # `_. Example:: - # Authorization: Bearer . 2. `access_token - # `_ query parameter. - # Multiple JWTs can be verified for a request. Each JWT has to be extracted - # from the locations its provider specified or from the default locations. - # Specify the HTTP headers to extract JWT token. For examples, following - # config: .. code-block:: yaml from_headers: - name: x-goog-iap-jwt- - # assertion can be used to extract token from header:: ``x-goog-iap-jwt- - # assertion: ``. - from_headers: List["JwtHeader"] = betterproto.message_field(6) - # JWT is sent in a query parameter. `jwt_params` represents the query - # parameter names. For example, if config is: .. code-block:: yaml - # from_params: - jwt_token The JWT format in query parameter is:: - # /path?jwt_token= - from_params: List[str] = betterproto.string_field(7) - # JWT is sent in a cookie. `from_cookies` represents the cookie names to - # extract from. For example, if config is: .. code-block:: yaml - # from_cookies: - auth-token Then JWT will be extracted from `auth-token` - # cookie in the request. - from_cookies: List[str] = betterproto.string_field(13) - # This field specifies the header name to forward a successfully verified JWT - # payload to the backend. The forwarded data is:: - # base64url_encoded(jwt_payload_in_JSON) If it is not specified, the payload - # will not be forwarded. - forward_payload_header: str = betterproto.string_field(8) - # When :ref:`forward_payload_header ` is specified, the - # base64 encoded payload will be added to the headers. Normally JWT based64 - # encode doesn't add padding. If this field is true, the header will be - # padded. This field is only relevant if :ref:`forward_payload_header ` is specified. - pad_forward_payload_header: bool = betterproto.bool_field(11) - # If non empty, successfully verified JWT payloads will be written to - # StreamInfo DynamicMetadata in the format as: *namespace* is the jwt_authn - # filter name as **envoy.filters.http.jwt_authn** The value is the - # *protobuf::Struct*. The value of this field will be the key for its - # *fields* and the value is the *protobuf::Struct* converted from JWT JSON - # payload. For example, if payload_in_metadata is *my_payload*: .. code- - # block:: yaml envoy.filters.http.jwt_authn: my_payload: iss: - # https://example.com sub: test@example.com aud: - # https://example.com exp: 1501281058 - payload_in_metadata: str = betterproto.string_field(9) - # If not empty, similar to :ref:`payload_in_metadata `, a - # successfully verified JWT header will be written to :ref:`Dynamic State - # ` as an entry - # (``protobuf::Struct``) in **envoy.filters.http.jwt_authn** *namespace* with - # the value of this field as the key. For example, if ``header_in_metadata`` - # is *my_header*: .. code-block:: yaml envoy.filters.http.jwt_authn: - # my_header: alg: JWT kid: EF71iSaosbC5C4tC6Syq1Gm647M alg: - # PS256 When the metadata has **envoy.filters.http.jwt_authn** entry already - # (for example if :ref:`payload_in_metadata ` is not empty), it - # will be inserted as a new entry in the same *namespace* as shown below: .. - # code-block:: yaml envoy.filters.http.jwt_authn: my_payload: - # iss: https://example.com sub: test@example.com aud: - # https://example.com exp: 1501281058 my_header: alg: JWT - # kid: EF71iSaosbC5C4tC6Syq1Gm647M alg: PS256 .. warning:: Using the - # same key name for :ref:`header_in_metadata ` and - # :ref:`payload_in_metadata ` is not suggested due to - # potential override of existing entry, while it is not enforced during - # config validation. - header_in_metadata: str = betterproto.string_field(14) - # Specify the clock skew in seconds when verifying JWT time constraint, such - # as `exp`, and `nbf`. If not specified, default is 60 seconds. - clock_skew_seconds: int = betterproto.uint32_field(10) - # Enables JWT cache, its size is specified by *jwt_cache_size*. Only valid - # JWT tokens are cached. - jwt_cache_config: "JwtCacheConfig" = betterproto.message_field(12) - - -@dataclass(eq=False, repr=False) -class JwtCacheConfig(betterproto.Message): - """This message specifies JWT Cache configuration.""" - - # The unit is number of JWT tokens, default to 100. - jwt_cache_size: int = betterproto.uint32_field(1) - - -@dataclass(eq=False, repr=False) -class RemoteJwks(betterproto.Message): - """ - This message specifies how to fetch JWKS from remote and how to cache it. - """ - - # The HTTP URI to fetch the JWKS. For example: .. code-block:: yaml - # http_uri: uri: https://www.googleapis.com/oauth2/v1/certs - # cluster: jwt.www.googleapis.com|443 timeout: 1s - http_uri: "_____config_core_v3__.HttpUri" = betterproto.message_field(1) - # Duration after which the cached JWKS should be expired. If not specified, - # default cache duration is 5 minutes. - cache_duration: timedelta = betterproto.message_field(2) - # Fetch Jwks asynchronously in the main thread before the listener is - # activated. Fetched Jwks can be used by all worker threads. If this feature - # is not enabled: * The Jwks is fetched on-demand when the requests come. - # During the fetching, first few requests are paused until the Jwks is - # fetched. * Each worker thread fetches its own Jwks since Jwks cache is per - # worker thread. If this feature is enabled: * Fetched Jwks is done in the - # main thread before the listener is activated. Its fetched Jwks can be - # used by all worker threads. Each worker thread doesn't need to fetch its - # own. * Jwks is ready when the requests come, not need to wait for the Jwks - # fetching. - async_fetch: "JwksAsyncFetch" = betterproto.message_field(3) - # Retry policy for fetching Jwks. optional. turned off by default. For - # example: .. code-block:: yaml retry_policy: retry_back_off: - # base_interval: 0.01s max_interval: 20s num_retries: 10 will yield - # a randomized truncated exponential backoff policy with an initial delay of - # 10ms 10 maximum attempts spaced at most 20s seconds. .. code-block:: yaml - # retry_policy: num_retries:1 uses the default :ref:`retry backoff - # strategy `. with the - # default base interval is 1000 milliseconds. and the default maximum - # interval of 10 times the base interval. if num_retries is omitted, the - # default is to allow only one retry. If enabled, the retry policy will apply - # to all Jwks fetching approaches, e.g. on demand or asynchronously in - # background. - retry_policy: "_____config_core_v3__.RetryPolicy" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class JwksAsyncFetch(betterproto.Message): - """ - Fetch Jwks asynchronously in the main thread when the filter config is - parsed. The listener is activated only after the Jwks is fetched. When the - Jwks is expired in the cache, it is fetched again in the main thread. The - fetched Jwks from the main thread can be used by all worker threads. - """ - - # If false, the listener is activated after the initial fetch is completed. - # The initial fetch result can be either successful or failed. If true, it is - # activated without waiting for the initial fetch to complete. Default is - # false. - fast_listener: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class JwtHeader(betterproto.Message): - """This message specifies a header location to extract JWT token.""" - - # The HTTP header name. - name: str = betterproto.string_field(1) - # The value prefix. The value format is "value_prefix" For example, - # for "Authorization: Bearer ", value_prefix="Bearer " with a space at - # the end. - value_prefix: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ProviderWithAudiences(betterproto.Message): - """Specify a required provider with audiences.""" - - # Specify a required provider name. - provider_name: str = betterproto.string_field(1) - # This field overrides the one specified in the JwtProvider. - audiences: List[str] = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class JwtRequirement(betterproto.Message): - """ - This message specifies a Jwt requirement. An empty message means JWT - verification is not required. Here are some config examples: .. code- - block:: yaml # Example 1: not required with an empty message # Example 2: - require A provider_name: provider-A # Example 3: require A or B - requires_any: requirements: - provider_name: provider-A - - provider_name: provider-B # Example 4: require A and B requires_all: - requirements: - provider_name: provider-A - provider_name: - provider-B # Example 5: require A and (B or C) requires_all: - requirements: - provider_name: provider-A - requires_any: - requirements: - provider_name: provider-B - - provider_name: provider-C # Example 6: require A or (B and C) - requires_any: requirements: - provider_name: provider-A - - requires_all: requirements: - provider_name: provider-B - - provider_name: provider-C # Example 7: A is optional (if token from A is - provided, it must be valid, but also allows missing token.) requires_any: - requirements: - provider_name: provider-A - allow_missing: {} # - Example 8: A is optional and B is required. requires_all: requirements: - - requires_any: requirements: - provider_name: provider-A - - allow_missing: {} - provider_name: provider-B [#next-free-field: 7] - """ - - # Specify a required provider name. - provider_name: str = betterproto.string_field(1, group="requires_type") - # Specify a required provider with audiences. - provider_and_audiences: "ProviderWithAudiences" = betterproto.message_field( - 2, group="requires_type" - ) - # Specify list of JwtRequirement. Their results are OR-ed. If any one of them - # passes, the result is passed. - requires_any: "JwtRequirementOrList" = betterproto.message_field( - 3, group="requires_type" - ) - # Specify list of JwtRequirement. Their results are AND-ed. All of them must - # pass, if one of them fails or missing, it fails. - requires_all: "JwtRequirementAndList" = betterproto.message_field( - 4, group="requires_type" - ) - # The requirement is always satisfied even if JWT is missing or the JWT - # verification fails. A typical usage is: this filter is used to only verify - # JWTs and pass the verified JWT payloads to another filter, the other filter - # will make decision. In this mode, all JWT tokens will be verified. - allow_missing_or_failed: "betterproto_lib_google_protobuf.Empty" = ( - betterproto.message_field(5, group="requires_type") - ) - # The requirement is satisfied if JWT is missing, but failed if JWT is - # presented but invalid. Similar to allow_missing_or_failed, this is used to - # only verify JWTs and pass the verified payload to another filter. The - # different is this mode will reject requests with invalid tokens. - allow_missing: "betterproto_lib_google_protobuf.Empty" = betterproto.message_field( - 6, group="requires_type" - ) - - -@dataclass(eq=False, repr=False) -class JwtRequirementOrList(betterproto.Message): - """ - This message specifies a list of RequiredProvider. Their results are OR-ed; - if any one of them passes, the result is passed - """ - - # Specify a list of JwtRequirement. - requirements: List["JwtRequirement"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class JwtRequirementAndList(betterproto.Message): - """ - This message specifies a list of RequiredProvider. Their results are AND- - ed; all of them must pass, if one of them fails or missing, it fails. - """ - - # Specify a list of JwtRequirement. - requirements: List["JwtRequirement"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class RequirementRule(betterproto.Message): - """ - This message specifies a Jwt requirement for a specific Route condition. - Example 1: .. code-block:: yaml - match: prefix: /healthz In - above example, "requires" field is empty for /healthz prefix match, it - means that requests matching the path prefix don't require JWT - authentication. Example 2: .. code-block:: yaml - match: prefix: - / requires: { provider_name: provider-A } In above example, all - requests matched the path prefix require jwt authentication from - "provider-A". - """ - - # The route matching parameter. Only when the match is satisfied, the - # "requires" field will apply. For example: following match will match all - # requests. .. code-block:: yaml match: prefix: / - match: "_____config_route_v3__.RouteMatch" = betterproto.message_field(1) - # Specify a Jwt requirement. Please see detail comment in message - # JwtRequirement. - requires: "JwtRequirement" = betterproto.message_field(2, group="requirement_type") - # Use requirement_name to specify a Jwt requirement. This requirement_name - # MUST be specified at the :ref:`requirement_map ` in - # `JwtAuthentication`. - requirement_name: str = betterproto.string_field(3, group="requirement_type") - - -@dataclass(eq=False, repr=False) -class FilterStateRule(betterproto.Message): - """ - This message specifies Jwt requirements based on stream_info.filterState. - This FilterState should use `Router::StringAccessor` object to set a string - value. Other HTTP filters can use it to specify Jwt requirements - dynamically. Example: .. code-block:: yaml name: jwt_selector - requires: issuer_1: provider_name: issuer1 issuer_2: - provider_name: issuer2 If a filter set "jwt_selector" with "issuer_1" to - FilterState for a request, jwt_authn filter will use - JwtRequirement{"provider_name": "issuer1"} to verify. - """ - - # The filter state name to retrieve the `Router::StringAccessor` object. - name: str = betterproto.string_field(1) - # A map of string keys to requirements. The string key is the string value in - # the FilterState with the name specified in the *name* field above. - requires: Dict[str, "JwtRequirement"] = betterproto.map_field( - 3, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class JwtAuthentication(betterproto.Message): - """ - This is the Envoy HTTP filter config for JWT authentication. For example: - .. code-block:: yaml providers: provider1: issuer: issuer1 - audiences: - audience1 - audience2 remote_jwks: - http_uri: uri: https://example.com/.well-known/jwks.json - cluster: example_jwks_cluster timeout: 1s provider2: - issuer: issuer2 local_jwks: inline_string: jwks_string - rules: # Not jwt verification is required for /health path - - match: prefix: /health # Jwt verification for provider1 is - required for path prefixed with "prefix" - match: prefix: - /prefix requires: provider_name: provider1 # Jwt - verification for either provider1 or provider2 is required for all other - requests. - match: prefix: / requires: - requires_any: requirements: - provider_name: - provider1 - provider_name: provider2 [#next-free-field: 6] - """ - - # Map of provider names to JwtProviders. .. code-block:: yaml providers: - # provider1: issuer: issuer1 audiences: - audience1 - # - audience2 remote_jwks: http_uri: uri: - # https://example.com/.well-known/jwks.json cluster: - # example_jwks_cluster timeout: 1s provider2: issuer: - # provider2 local_jwks: inline_string: jwks_string - providers: Dict[str, "JwtProvider"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - # Specifies requirements based on the route matches. The first matched - # requirement will be applied. If there are overlapped match conditions, - # please put the most specific match first. Examples .. code-block:: yaml - # rules: - match: prefix: /healthz - match: prefix: - # /baz requires: provider_name: provider1 - match: - # prefix: /foo requires: requires_any: requirements: - # - provider_name: provider1 - provider_name: provider2 - - # match: prefix: /bar requires: requires_all: - # requirements: - provider_name: provider1 - - # provider_name: provider2 - rules: List["RequirementRule"] = betterproto.message_field(2) - # This message specifies Jwt requirements based on stream_info.filterState. - # Other HTTP filters can use it to specify Jwt requirements dynamically. The - # *rules* field above is checked first, if it could not find any matches, - # check this one. - filter_state_rules: "FilterStateRule" = betterproto.message_field(3) - # When set to true, bypass the `CORS preflight request - # `_ - # regardless of JWT requirements specified in the rules. - bypass_cors_preflight: bool = betterproto.bool_field(4) - # A map of unique requirement_names to JwtRequirements. - # :ref:`requirement_name ` in `PerRouteConfig` uses this map to - # specify a JwtRequirement. - requirement_map: Dict[str, "JwtRequirement"] = betterproto.map_field( - 5, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class PerRouteConfig(betterproto.Message): - """Specify per-route config.""" - - # Disable Jwt Authentication for this route. - disabled: bool = betterproto.bool_field(1, group="requirement_specifier") - # Use requirement_name to specify a JwtRequirement. This requirement_name - # MUST be specified at the :ref:`requirement_map ` in - # `JwtAuthentication`. If no, the requests using this route will be rejected - # with 403. - requirement_name: str = betterproto.string_field(2, group="requirement_specifier") - - -from ......config.core import v3 as _____config_core_v3__ -from ......config.route import v3 as _____config_route_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/kill_request/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/kill_request/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/kill_request/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/kill_request/v3/__init__.py deleted file mode 100644 index 7e9b2b4..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/kill_request/v3/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/kill_request/v3/kill_request.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class KillRequestDirection(betterproto.Enum): - REQUEST = 0 - RESPONSE = 1 - - -@dataclass(eq=False, repr=False) -class KillRequest(betterproto.Message): - """Configuration for KillRequest filter.""" - - # The probability that a Kill request will be triggered. - probability: "_____type_v3__.FractionalPercent" = betterproto.message_field(1) - # The name of the kill request header. If this field is not empty, it will - # override the :ref:`default header - # ` name. Otherwise the default - # header name will be used. - kill_request_header: str = betterproto.string_field(2) - direction: "KillRequestDirection" = betterproto.enum_field(3) - - -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/local_ratelimit/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/local_ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/local_ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/local_ratelimit/v3/__init__.py deleted file mode 100644 index 118d81d..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/local_ratelimit/v3/__init__.py +++ /dev/null @@ -1,87 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class LocalRateLimit(betterproto.Message): - """[#next-free-field: 12]""" - - # The human readable prefix to use when emitting stats. - stat_prefix: str = betterproto.string_field(1) - # This field allows for a custom HTTP response status code to the downstream - # client when the request has been rate limited. Defaults to 429 - # (TooManyRequests). .. note:: If this is set to < 400, 429 will be used - # instead. - status: "_____type_v3__.HttpStatus" = betterproto.message_field(2) - # The token bucket configuration to use for rate limiting requests that are - # processed by this filter. Each request processed by the filter consumes a - # single token. If the token is available, the request will be allowed. If no - # tokens are available, the request will receive the configured rate limit - # status. .. note:: It's fine for the token bucket to be unset for the - # global configuration since the rate limit can be applied at a the virtual - # host or route level. Thus, the token bucket must be set for the per route - # configuration otherwise the config will be rejected. .. note:: When using - # per route configuration, the bucket becomes unique to that route. .. note:: - # In the current implementation the token bucket's :ref:`fill_interval - # ` must be >= 50ms to - # avoid too aggressive refills. - token_bucket: "_____type_v3__.TokenBucket" = betterproto.message_field(3) - # If set, this will enable -- but not necessarily enforce -- the rate limit - # for the given fraction of requests. Defaults to 0% of requests for safety. - filter_enabled: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(4) - ) - # If set, this will enforce the rate limit decisions for the given fraction - # of requests. Note: this only applies to the fraction of enabled requests. - # Defaults to 0% of requests for safety. - filter_enforced: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(5) - ) - # Specifies a list of HTTP headers that should be added to each request that - # has been rate limited and is also forwarded upstream. This can only occur - # when the filter is enabled but not enforced. - request_headers_to_add_when_not_enforced: List[ - "_____config_core_v3__.HeaderValueOption" - ] = betterproto.message_field(10) - # Specifies a list of HTTP headers that should be added to each response for - # requests that have been rate limited. This occurs when the filter is either - # enabled or fully enforced. - response_headers_to_add: List[ - "_____config_core_v3__.HeaderValueOption" - ] = betterproto.message_field(6) - # The rate limit descriptor list to use in the local rate limit to override - # on. The rate limit descriptor is selected by the first full match from the - # request descriptors. Example on how to use ::ref:`this - # ` .. note:: In the - # current implementation the descriptor's token bucket :ref:`fill_interval - # ` must be a multiple - # global :ref:`token bucket's` fill interval. The - # descriptors must match verbatim for rate limiting to apply. There is no - # partial match by a subset of descriptor entries in the current - # implementation. - descriptors: List[ - "____common_ratelimit_v3__.LocalRateLimitDescriptor" - ] = betterproto.message_field(8) - # Specifies the rate limit configurations to be applied with the same stage - # number. If not set, the default stage number is 0. .. note:: The filter - # supports a range of 0 - 10 inclusively for stage numbers. - stage: int = betterproto.uint32_field(9) - # Specifies the scope of the rate limiter's token bucket. If set to false, - # the token bucket is shared across all worker threads, thus the rate limits - # are applied per Envoy process. If set to true, a token bucket is allocated - # for each connection. Thus the rate limits are applied per connection - # thereby allowing one to rate limit requests on a per connection basis. If - # unspecified, the default value is false. - local_rate_limit_per_downstream_connection: bool = betterproto.bool_field(11) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type import v3 as _____type_v3__ -from .....common.ratelimit import v3 as ____common_ratelimit_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/lua/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/lua/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/lua/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/lua/v3/__init__.py deleted file mode 100644 index 7acf2fe..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/lua/v3/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/lua/v3/lua.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Lua(betterproto.Message): - # The Lua code that Envoy will execute. This can be a very small script that - # further loads code from disk if desired. Note that if JSON configuration is - # used, the code must be properly escaped. YAML configuration may be easier - # to read since YAML supports multi-line strings so complex scripts can be - # easily expressed inline in the configuration. - inline_code: str = betterproto.string_field(1) - # Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute - # `. The Lua - # source codes can be loaded from inline string or local files. Example: .. - # code-block:: yaml source_codes: hello.lua: inline_string: | - # function envoy_on_response(response_handle) -- Do something. - # end world.lua: filename: /etc/lua/world.lua - source_codes: Dict[str, "_____config_core_v3__.DataSource"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class LuaPerRoute(betterproto.Message): - # Disable the Lua filter for this particular vhost or route. If disabled is - # specified in multiple per-filter-configs, the most specific one will be - # used. - disabled: bool = betterproto.bool_field(1, group="override") - # A name of a Lua source code stored in :ref:`Lua.source_codes - # `. - name: str = betterproto.string_field(2, group="override") - # A configured per-route Lua source code that can be served by RDS or - # provided inline. - source_code: "_____config_core_v3__.DataSource" = betterproto.message_field( - 3, group="override" - ) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/oauth2/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/oauth2/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/oauth2/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/oauth2/v3/__init__.py deleted file mode 100644 index c884fdb..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/oauth2/v3/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/oauth2/v3/oauth.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OAuth2Credentials(betterproto.Message): - # The client_id to be used in the authorize calls. This value will be URL - # encoded when sent to the OAuth server. - client_id: str = betterproto.string_field(1) - # The secret used to retrieve the access token. This value will be URL - # encoded when sent to the OAuth server. - token_secret: "____transport_sockets_tls_v3__.SdsSecretConfig" = ( - betterproto.message_field(2) - ) - # If present, the secret token will be a HMAC using the provided secret. - hmac_secret: "____transport_sockets_tls_v3__.SdsSecretConfig" = ( - betterproto.message_field(3, group="token_formation") - ) - # The cookie names used in OAuth filters flow. - cookie_names: "OAuth2CredentialsCookieNames" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class OAuth2CredentialsCookieNames(betterproto.Message): - # Cookie name to hold OAuth bearer token value. When the authentication - # server validates the client and returns an authorization token back to the - # OAuth filter, no matter what format that token is, if - # :ref:`forward_bearer_token ` is set to true the filter will - # send over the bearer token as a cookie with this name to the upstream. - # Defaults to ``BearerToken``. - bearer_token: str = betterproto.string_field(1) - # Cookie name to hold OAuth HMAC value. Defaults to ``OauthHMAC``. - oauth_hmac: str = betterproto.string_field(2) - # Cookie name to hold OAuth expiry value. Defaults to ``OauthExpires``. - oauth_expires: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class OAuth2Config(betterproto.Message): - """OAuth config [#next-free-field: 11]""" - - # Endpoint on the authorization server to retrieve the access token from. - token_endpoint: "_____config_core_v3__.HttpUri" = betterproto.message_field(1) - # The endpoint redirect to for authorization in response to unauthorized - # requests. - authorization_endpoint: str = betterproto.string_field(2) - # Credentials used for OAuth. - credentials: "OAuth2Credentials" = betterproto.message_field(3) - # The redirect URI passed to the authorization endpoint. Supports header - # formatting tokens. For more information, including details on header value - # syntax, see the documentation on :ref:`custom request headers - # `. This URI should not - # contain any query parameters. - redirect_uri: str = betterproto.string_field(4) - # Matching criteria used to determine whether a path appears to be the result - # of a redirect from the authorization server. - redirect_path_matcher: "_____type_matcher_v3__.PathMatcher" = ( - betterproto.message_field(5) - ) - # The path to sign a user out, clearing their credential cookies. - signout_path: "_____type_matcher_v3__.PathMatcher" = betterproto.message_field(6) - # Forward the OAuth token as a Bearer to upstream web service. - forward_bearer_token: bool = betterproto.bool_field(7) - # Any request that matches any of the provided matchers will be passed - # through without OAuth validation. - pass_through_matcher: List[ - "_____config_route_v3__.HeaderMatcher" - ] = betterproto.message_field(8) - # Optional list of OAuth scopes to be claimed in the authorization request. - # If not specified, defaults to "user" scope. OAuth RFC - # https://tools.ietf.org/html/rfc6749#section-3.3 - auth_scopes: List[str] = betterproto.string_field(9) - # Optional resource parameter for authorization request RFC: - # https://tools.ietf.org/html/rfc8707 - resources: List[str] = betterproto.string_field(10) - - -@dataclass(eq=False, repr=False) -class OAuth2(betterproto.Message): - """Filter config.""" - - # Leave this empty to disable OAuth2 for a specific route, using per filter - # config. - config: "OAuth2Config" = betterproto.message_field(1) - - -from ......config.core import v3 as _____config_core_v3__ -from ......config.route import v3 as _____config_route_v3__ -from ......type.matcher import v3 as _____type_matcher_v3__ -from .....transport_sockets.tls import v3 as ____transport_sockets_tls_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/on_demand/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/on_demand/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/on_demand/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/on_demand/v3/__init__.py deleted file mode 100644 index c418bb8..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/on_demand/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/on_demand/v3/on_demand.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OnDemand(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/original_src/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/original_src/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/original_src/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/original_src/v3/__init__.py deleted file mode 100644 index 344e4d3..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/original_src/v3/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/original_src/v3/original_src.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OriginalSrc(betterproto.Message): - """ - The Original Src filter binds upstream connections to the original source - address determined for the request. This address could come from something - like the Proxy Protocol filter, or it could come from trusted http headers. - [#extension: envoy.filters.http.original_src] - """ - - # Sets the SO_MARK option on the upstream connection's socket to the provided - # value. Used to ensure that non-local addresses may be routed back through - # envoy when binding to the original source address. The option will not be - # applied if the mark is 0. - mark: int = betterproto.uint32_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ratelimit/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/ratelimit/v3/__init__.py deleted file mode 100644 index f88f459..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/ratelimit/v3/__init__.py +++ /dev/null @@ -1,328 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class RateLimitXRateLimitHeadersRfcVersion(betterproto.Enum): - OFF = 0 - DRAFT_VERSION_03 = 1 - - -class RateLimitConfigActionMetaDataSource(betterproto.Enum): - DYNAMIC = 0 - ROUTE_ENTRY = 1 - - -class RateLimitPerRouteVhRateLimitsOptions(betterproto.Enum): - OVERRIDE = 0 - INCLUDE = 1 - IGNORE = 2 - - -class RateLimitPerRouteOverrideOptions(betterproto.Enum): - DEFAULT = 0 - OVERRIDE_POLICY = 1 - INCLUDE_POLICY = 2 - IGNORE_POLICY = 3 - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """[#next-free-field: 10]""" - - # The rate limit domain to use when calling the rate limit service. - domain: str = betterproto.string_field(1) - # Specifies the rate limit configurations to be applied with the same stage - # number. If not set, the default stage number is 0. .. note:: The filter - # supports a range of 0 - 10 inclusively for stage numbers. - stage: int = betterproto.uint32_field(2) - # The type of requests the filter should apply to. The supported types are - # *internal*, *external* or *both*. A request is considered internal if - # :ref:`x-envoy-internal` is - # set to true. If :ref:`x-envoy-internal` is not set or false, a request is considered external. The - # filter defaults to *both*, and it will apply to all request types. - request_type: str = betterproto.string_field(3) - # The timeout in milliseconds for the rate limit service RPC. If not set, - # this defaults to 20ms. - timeout: timedelta = betterproto.message_field(4) - # The filter's behaviour in case the rate limiting service does not respond - # back. When it is set to true, Envoy will not allow traffic in case of - # communication failure between rate limiting service and the proxy. - failure_mode_deny: bool = betterproto.bool_field(5) - # Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - # of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - # HTTP code will be 200 for a gRPC response. - rate_limited_as_resource_exhausted: bool = betterproto.bool_field(6) - # Configuration for an external rate limit service provider. If not - # specified, any calls to the rate limit service will immediately return - # success. - rate_limit_service: "_____config_ratelimit_v3__.RateLimitServiceConfig" = ( - betterproto.message_field(7) - ) - # Defines the standard version to use for X-RateLimit headers emitted by the - # filter: * ``X-RateLimit-Limit`` - indicates the request-quota associated to - # the client in the current time-window followed by the description of the - # quota policy. The values are returned by the rate limiting service in :re - # f:`current_limit` field. Example: `10, 10;w=1;name="per- - # ip", 1000;w=3600`. * ``X-RateLimit-Remaining`` - indicates the remaining - # requests in the current time-window. The values are returned by the rate - # limiting service in :ref:`limit_remaining` field. * - # ``X-RateLimit-Reset`` - indicates the number of seconds until reset of - # the current time-window. The values are returned by the rate limiting - # service in :ref:`duration_until_reset` field. In - # case rate limiting policy specifies more then one time window, the values - # above represent the window that is closest to reaching its limit. For more - # information about the headers specification see selected version of the - # `draft RFC `_. Disabled by default. - enable_x_ratelimit_headers: "RateLimitXRateLimitHeadersRfcVersion" = ( - betterproto.enum_field(8) - ) - # Disables emitting the :ref:`x-envoy- - # ratelimited` header in case - # of rate limiting (i.e. 429 responses). Having this header not present - # potentially makes the request retriable. - disable_x_envoy_ratelimited_header: bool = betterproto.bool_field(9) - - -@dataclass(eq=False, repr=False) -class RateLimitConfig(betterproto.Message): - """ - Global rate limiting :ref:`architecture overview - `. Also applies to Local rate limiting - :ref:`using descriptors - `. [#not-implemented- - hide:] - """ - - # Refers to the stage set in the filter. The rate limit configuration only - # applies to filters with the same stage number. The default stage number is - # 0. .. note:: The filter supports a range of 0 - 10 inclusively for stage - # numbers. - stage: int = betterproto.uint32_field(1) - # The key to be set in runtime to disable this rate limit configuration. - disable_key: str = betterproto.string_field(2) - # A list of actions that are to be applied for this rate limit configuration. - # Order matters as the actions are processed sequentially and the descriptor - # is composed by appending descriptor entries in that sequence. If an action - # cannot append a descriptor entry, no descriptor is generated for the - # configuration. See :ref:`composing actions - # ` for additional - # documentation. - actions: List["RateLimitConfigAction"] = betterproto.message_field(3) - # An optional limit override to be appended to the descriptor produced by - # this rate limit configuration. If the override value is invalid or cannot - # be resolved from metadata, no override is provided. See :ref:`rate limit - # override ` for more - # information. - limit: "RateLimitConfigOverride" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RateLimitConfigAction(betterproto.Message): - """[#next-free-field: 10]""" - - # Rate limit on source cluster. - source_cluster: "RateLimitConfigActionSourceCluster" = betterproto.message_field( - 1, group="action_specifier" - ) - # Rate limit on destination cluster. - destination_cluster: "RateLimitConfigActionDestinationCluster" = ( - betterproto.message_field(2, group="action_specifier") - ) - # Rate limit on request headers. - request_headers: "RateLimitConfigActionRequestHeaders" = betterproto.message_field( - 3, group="action_specifier" - ) - # Rate limit on remote address. - remote_address: "RateLimitConfigActionRemoteAddress" = betterproto.message_field( - 4, group="action_specifier" - ) - # Rate limit on a generic key. - generic_key: "RateLimitConfigActionGenericKey" = betterproto.message_field( - 5, group="action_specifier" - ) - # Rate limit on the existence of request headers. - header_value_match: "RateLimitConfigActionHeaderValueMatch" = ( - betterproto.message_field(6, group="action_specifier") - ) - # Rate limit on metadata. - metadata: "RateLimitConfigActionMetaData" = betterproto.message_field( - 8, group="action_specifier" - ) - # Rate limit descriptor extension. See the rate limit descriptor extensions - # documentation. [#extension-category: envoy.rate_limit_descriptors] - extension: "_____config_core_v3__.TypedExtensionConfig" = betterproto.message_field( - 9, group="action_specifier" - ) - - -@dataclass(eq=False, repr=False) -class RateLimitConfigActionSourceCluster(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("source_cluster", "") is derived from the :option:`--service-cluster` option. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitConfigActionDestinationCluster(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("destination_cluster", "") Once a - request matches against a route table rule, a routed cluster is determined - by one of the following :ref:`route table configuration - ` settings: * - :ref:`cluster ` - indicates the upstream cluster to route to. * :ref:`weighted_clusters - ` - chooses a cluster randomly from a set of clusters with attributed weight. * - :ref:`cluster_header - ` indicates - which header in the request contains the target cluster. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitConfigActionRequestHeaders(betterproto.Message): - """ - The following descriptor entry is appended when a header contains a key - that matches the *header_name*: .. code-block:: cpp ("", - "") - """ - - # The header name to be queried from the request headers. The header’s value - # is used to populate the value of the descriptor entry for the - # descriptor_key. - header_name: str = betterproto.string_field(1) - # The key to use in the descriptor entry. - descriptor_key: str = betterproto.string_field(2) - # If set to true, Envoy skips the descriptor while calling rate limiting - # service when header is not present in the request. By default it skips - # calling the rate limiting service if this header is not present in the - # request. - skip_if_absent: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitConfigActionRemoteAddress(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor and is - populated using the trusted address from :ref:`x-forwarded-for - `: .. code-block:: cpp - ("remote_address", "") - """ - - pass - - -@dataclass(eq=False, repr=False) -class RateLimitConfigActionGenericKey(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("generic_key", "") - """ - - # The value to use in the descriptor entry. - descriptor_value: str = betterproto.string_field(1) - # An optional key to use in the descriptor entry. If not set it defaults to - # 'generic_key' as the descriptor key. - descriptor_key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimitConfigActionHeaderValueMatch(betterproto.Message): - """ - The following descriptor entry is appended to the descriptor: .. code- - block:: cpp ("header_match", "") - """ - - # The value to use in the descriptor entry. - descriptor_value: str = betterproto.string_field(1) - # If set to true, the action will append a descriptor entry when the request - # matches the headers. If set to false, the action will append a descriptor - # entry when the request does not match the headers. The default value is - # true. - expect_match: bool = betterproto.bool_field(2) - # Specifies a set of headers that the rate limit action should match on. The - # action will check the request’s headers against all the specified headers - # in the config. A match will happen if all the headers in the config are - # present in the request with the same values (or based on presence if the - # value field is not in the config). - headers: List["_____config_route_v3__.HeaderMatcher"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitConfigActionMetaData(betterproto.Message): - """ - The following descriptor entry is appended when the metadata contains a key - value: .. code-block:: cpp ("", - "") - """ - - # The key to use in the descriptor entry. - descriptor_key: str = betterproto.string_field(1) - # Metadata struct that defines the key and path to retrieve the string value. - # A match will only happen if the value in the metadata is of type string. - metadata_key: "_____type_metadata_v3__.MetadataKey" = betterproto.message_field(2) - # An optional value to use if *metadata_key* is empty. If not set and no - # value is present under the metadata_key then no descriptor is generated. - default_value: str = betterproto.string_field(3) - # Source of metadata - source: "RateLimitConfigActionMetaDataSource" = betterproto.enum_field(4) - - -@dataclass(eq=False, repr=False) -class RateLimitConfigOverride(betterproto.Message): - # Limit override from dynamic metadata. - dynamic_metadata: "RateLimitConfigOverrideDynamicMetadata" = ( - betterproto.message_field(1, group="override_specifier") - ) - - -@dataclass(eq=False, repr=False) -class RateLimitConfigOverrideDynamicMetadata(betterproto.Message): - """Fetches the override from the dynamic metadata.""" - - # Metadata struct that defines the key and path to retrieve the struct value. - # The value must be a struct containing an integer "requests_per_unit" - # property and a "unit" property with a value parseable to - # :ref:`RateLimitUnit enum ` - metadata_key: "_____type_metadata_v3__.MetadataKey" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class RateLimitPerRoute(betterproto.Message): - # Specifies if the rate limit filter should include the virtual host rate - # limits. - vh_rate_limits: "RateLimitPerRouteVhRateLimitsOptions" = betterproto.enum_field(1) - # Specifies if the rate limit filter should include the lower levels (route - # level, virtual host level or cluster weight level) rate limits override - # options. [#not-implemented-hide:] - override_option: "RateLimitPerRouteOverrideOptions" = betterproto.enum_field(2) - # Rate limit configuration. If not set, uses the :ref:`VirtualHost.rate_limit - # s` or :ref:`Rou - # teAction.rate_limits` fields instead. [#not-implemented-hide:] - rate_limits: List["RateLimitConfig"] = betterproto.message_field(3) - - -from ......config.core import v3 as _____config_core_v3__ -from ......config.ratelimit import v3 as _____config_ratelimit_v3__ -from ......config.route import v3 as _____config_route_v3__ -from ......type.metadata import v3 as _____type_metadata_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/rbac/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/rbac/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/rbac/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/rbac/v3/__init__.py deleted file mode 100644 index 0a996c9..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/rbac/v3/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/rbac/v3/rbac.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Rbac(betterproto.Message): - """RBAC filter config.""" - - # Specify the RBAC rules to be applied globally. If absent, no enforcing RBAC - # policy will be applied. If present and empty, DENY. - rules: "_____config_rbac_v3__.Rbac" = betterproto.message_field(1) - # Shadow rules are not enforced by the filter (i.e., returning a 403) but - # will emit stats and logs and can be used for rule testing. If absent, no - # shadow RBAC policy will be applied. - shadow_rules: "_____config_rbac_v3__.Rbac" = betterproto.message_field(2) - # If specified, shadow rules will emit stats with the given prefix. This is - # useful to distinguish the stat when there are more than 1 RBAC filter - # configured with shadow rules. - shadow_rules_stat_prefix: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class RbacPerRoute(betterproto.Message): - # Override the global configuration of the filter with this new config. If - # absent, the global RBAC policy will be disabled for this route. - rbac: "Rbac" = betterproto.message_field(2) - - -from ......config.rbac import v3 as _____config_rbac_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/router/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/router/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/router/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/router/v3/__init__.py deleted file mode 100644 index ce02bc9..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/router/v3/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/router/v3/router.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Router(betterproto.Message): - """[#next-free-field: 8]""" - - # Whether the router generates dynamic cluster statistics. Defaults to true. - # Can be disabled in high performance scenarios. - dynamic_stats: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # Whether to start a child span for egress routed calls. This can be useful - # in scenarios where other filters (auth, ratelimit, etc.) make outbound - # calls and have child spans rooted at the same ingress parent. Defaults to - # false. - start_child_span: bool = betterproto.bool_field(2) - # Configuration for HTTP upstream logs emitted by the router. Upstream logs - # are configured in the same way as access logs, but each log entry - # represents an upstream request. Presuming retries are configured, multiple - # upstream requests may be made for each downstream (inbound) request. - upstream_log: List[ - "_____config_accesslog_v3__.AccessLog" - ] = betterproto.message_field(3) - # Do not add any additional *x-envoy-* headers to requests or responses. This - # only affects the :ref:`router filter generated *x-envoy-* headers - # `, other Envoy filters and the HTTP - # connection manager may continue to set *x-envoy-* headers. - suppress_envoy_headers: bool = betterproto.bool_field(4) - # Specifies a list of HTTP headers to strictly validate. Envoy will reject a - # request and respond with HTTP status 400 if the request contains an invalid - # value for any of the headers listed in this field. Strict header checking - # is only supported for the following headers: Value must be a ','-delimited - # list (i.e. no spaces) of supported retry policy values: * - # :ref:`config_http_filters_router_x-envoy-retry-grpc-on` * - # :ref:`config_http_filters_router_x-envoy-retry-on` Value must be an - # integer: * :ref:`config_http_filters_router_x-envoy-max-retries` * - # :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` * - # :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - strict_check_headers: List[str] = betterproto.string_field(5) - # If not set, ingress Envoy will ignore :ref:`config_http_filters_router_x- - # envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when - # deriving timeout for upstream cluster. - respect_expected_rq_timeout: bool = betterproto.bool_field(6) - # If set, Envoy will avoid incrementing HTTP failure code stats on gRPC - # requests. This includes the individual status code value (e.g. - # upstream_rq_504) and group stats (e.g. upstream_rq_5xx). This field is - # useful if interested in relying only on the gRPC stats filter to define - # success and failure metrics for gRPC requests as not all failed gRPC - # requests charge HTTP status code metrics. See :ref:`gRPC stats - # filter` documentation for more details. - suppress_grpc_request_failure_code_stats: bool = betterproto.bool_field(7) - - -from ......config.accesslog import v3 as _____config_accesslog_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/set_metadata/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/set_metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/set_metadata/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/set_metadata/v3/__init__.py deleted file mode 100644 index cbfbf0d..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/set_metadata/v3/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - # The metadata namespace. - metadata_namespace: str = betterproto.string_field(1) - # The value to update the namespace with. See :ref:`the filter documentation - # ` for more information on how this value - # is merged with potentially existing ones. - value: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/stateful_session/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/stateful_session/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/stateful_session/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/stateful_session/v3/__init__.py deleted file mode 100644 index e6749e7..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/stateful_session/v3/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/stateful_session/v3/stateful_session.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class StatefulSession(betterproto.Message): - # Specific implementation of session state. This session state will be used - # to store and get address of the upstream host to which the session is - # assigned. [#extension-category: envoy.http.stateful_session] - session_state: "_____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(1) - ) - - -@dataclass(eq=False, repr=False) -class StatefulSessionPerRoute(betterproto.Message): - # Disable the stateful session filter for this particular vhost or route. If - # disabled is specified in multiple per-filter-configs, the most specific one - # will be used. - disabled: bool = betterproto.bool_field(1, group="override") - # Per-route stateful session configuration that can be served by RDS or - # static route table. - stateful_session: "StatefulSession" = betterproto.message_field(2, group="override") - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/tap/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/tap/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/tap/v3/__init__.py deleted file mode 100644 index a30b13f..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/tap/v3/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/tap/v3/tap.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Tap(betterproto.Message): - """Top level configuration for the tap filter.""" - - # Common configuration for the HTTP tap filter. - common_config: "____common_tap_v3__.CommonExtensionConfig" = ( - betterproto.message_field(1) - ) - - -from .....common.tap import v3 as ____common_tap_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/wasm/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/wasm/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/http/wasm/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/http/wasm/v3/__init__.py deleted file mode 100644 index 7479ac3..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/http/wasm/v3/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/http/wasm/v3/wasm.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Wasm(betterproto.Message): - # General Plugin configuration. - config: "____wasm_v3__.PluginConfig" = betterproto.message_field(1) - - -from .....wasm import v3 as ____wasm_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/http_inspector/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/http_inspector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/http_inspector/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/http_inspector/v3/__init__.py deleted file mode 100644 index 3c80f45..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/listener/http_inspector/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HttpInspector(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/original_dst/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/original_dst/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/original_dst/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/original_dst/v3/__init__.py deleted file mode 100644 index 4b80c7b..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/listener/original_dst/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/listener/original_dst/v3/original_dst.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OriginalDst(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/original_src/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/original_src/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/original_src/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/original_src/v3/__init__.py deleted file mode 100644 index e946daf..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/listener/original_src/v3/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/listener/original_src/v3/original_src.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OriginalSrc(betterproto.Message): - """ - The Original Src filter binds upstream connections to the original source - address determined for the connection. This address could come from - something like the Proxy Protocol filter, or it could come from trusted - http headers. - """ - - # Whether to bind the port to the one used in the original downstream - # connection. [#not-implemented-hide:] - bind_port: bool = betterproto.bool_field(1) - # Sets the SO_MARK option on the upstream connection's socket to the provided - # value. Used to ensure that non-local addresses may be routed back through - # envoy when binding to the original source address. The option will not be - # applied if the mark is 0. - mark: int = betterproto.uint32_field(2) diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/proxy_protocol/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/proxy_protocol/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/proxy_protocol/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/proxy_protocol/v3/__init__.py deleted file mode 100644 index 908ce5d..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/listener/proxy_protocol/v3/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ProxyProtocol(betterproto.Message): - # The list of rules to apply to requests. - rules: List["ProxyProtocolRule"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ProxyProtocolKeyValuePair(betterproto.Message): - # The namespace — if this is empty, the filter's namespace will be used. - metadata_namespace: str = betterproto.string_field(1) - # The key to use within the namespace. - key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ProxyProtocolRule(betterproto.Message): - """ - A Rule defines what metadata to apply when a header is present or missing. - """ - - # The type that triggers the rule - required TLV type is defined as uint8_t - # in proxy protocol. See `the spec - # `_ for - # details. - tlv_type: int = betterproto.uint32_field(1) - # If the TLV type is present, apply this metadata KeyValuePair. - on_tlv_present: "ProxyProtocolKeyValuePair" = betterproto.message_field(2) diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/tls_inspector/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/tls_inspector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/listener/tls_inspector/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/listener/tls_inspector/v3/__init__.py deleted file mode 100644 index 03b7e67..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/listener/tls_inspector/v3/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class TlsInspector(betterproto.Message): - # Populate `JA3` fingerprint hash using data from the TLS Client Hello - # packet. Default is false. - enable_ja3_fingerprinting: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/client_ssl_auth/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/client_ssl_auth/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/client_ssl_auth/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/client_ssl_auth/v3/__init__.py deleted file mode 100644 index 4e83d7a..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/client_ssl_auth/v3/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ClientSslAuth(betterproto.Message): - # The :ref:`cluster manager ` cluster that - # runs the authentication service. The filter will connect to the service - # every 60s to fetch the list of principals. The service must support the - # expected :ref:`REST API `. - auth_api_cluster: str = betterproto.string_field(1) - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(2) - # Time in milliseconds between principal refreshes from the authentication - # service. Default is 60000 (60s). The actual fetch time will be this value - # plus a random jittered value between 0-refresh_delay_ms milliseconds. - refresh_delay: timedelta = betterproto.message_field(3) - # An optional list of IP address and subnet masks that should be white listed - # for access by the filter. If no list is provided, there is no IP allowlist. - ip_white_list: List["_____config_core_v3__.CidrRange"] = betterproto.message_field( - 4 - ) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/connection_limit/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/connection_limit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/connection_limit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/connection_limit/v3/__init__.py deleted file mode 100644 index c596d64..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/connection_limit/v3/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ConnectionLimit(betterproto.Message): - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The max connections configuration to use for new incoming connections that - # are processed by the filter's filter chain. When max_connection is reached, - # the incoming connection will be closed after delay duration. - max_connections: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT64 - ) - # The delay configuration to use for rejecting the connection after some - # specified time duration instead of immediately rejecting the connection. - # That way, a malicious user is not able to retry as fast as possible which - # provides a better DoS protection for Envoy. If this is not present, the - # connection will be closed immediately. - delay: timedelta = betterproto.message_field(3) - # Runtime flag that controls whether the filter is enabled or not. If not - # specified, defaults to enabled. - runtime_enabled: "_____config_core_v3__.RuntimeFeatureFlag" = ( - betterproto.message_field(4) - ) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/direct_response/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/direct_response/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/direct_response/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/direct_response/v3/__init__.py deleted file mode 100644 index 1c6a661..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/direct_response/v3/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/direct_response/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - # Response data as a data source. - response: "_____config_core_v3__.DataSource" = betterproto.message_field(1) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/router/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/router/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/router/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/router/v3/__init__.py deleted file mode 100644 index 59c3f2e..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/router/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Router(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/v3/__init__.py deleted file mode 100644 index 0dab9ac..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/dubbo_proxy/v3/__init__.py +++ /dev/null @@ -1,150 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto, envoy/extensions/filters/network/dubbo_proxy/v3/route.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ProtocolType(betterproto.Enum): - """Dubbo Protocol types supported by Envoy.""" - - # the default protocol. - Dubbo = 0 - - -class SerializationType(betterproto.Enum): - """Dubbo Serialization types supported by Envoy.""" - - # the default serialization protocol. - Hessian2 = 0 - - -@dataclass(eq=False, repr=False) -class RouteConfiguration(betterproto.Message): - """[#next-free-field: 6]""" - - # The name of the route configuration. Reserved for future use in - # asynchronous route discovery. - name: str = betterproto.string_field(1) - # The interface name of the service. Wildcard interface are supported in the - # suffix or prefix form. e.g. ``*.methods.add`` will match - # ``com.dev.methods.add``, ``com.prod.methods.add``, etc. - # ``com.dev.methods.*`` will match ``com.dev.methods.add``, - # ``com.dev.methods.update``, etc. Special wildcard ``*`` matching any - # interface. .. note:: The wildcard will not match the empty string. e.g. - # ``*.methods.add`` will match ``com.dev.methods.add`` but not - # ``.methods.add``. - interface: str = betterproto.string_field(2) - # Which group does the interface belong to. - group: str = betterproto.string_field(3) - # The version number of the interface. - version: str = betterproto.string_field(4) - # The list of routes that will be matched, in order, against incoming - # requests. The first route that matches will be used. - routes: List["Route"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class Route(betterproto.Message): - # Route matching parameters. - match: "RouteMatch" = betterproto.message_field(1) - # Route request to some upstream cluster. - route: "RouteAction" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RouteMatch(betterproto.Message): - # Method level routing matching. - method: "MethodMatch" = betterproto.message_field(1) - # Specifies a set of headers that the route should match on. The router will - # check the request’s headers against all the specified headers in the route - # config. A match will happen if all the headers in the route are present in - # the request with the same values (or based on presence if the value field - # is not in the config). - headers: List["_____config_route_v3__.HeaderMatcher"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RouteAction(betterproto.Message): - # Indicates the upstream cluster to which the request should be routed. - cluster: str = betterproto.string_field(1, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. Currently ClusterWeight only supports the name and weight fields. - weighted_clusters: "_____config_route_v3__.WeightedCluster" = ( - betterproto.message_field(2, group="cluster_specifier") - ) - - -@dataclass(eq=False, repr=False) -class MethodMatch(betterproto.Message): - # The name of the method. - name: "_____type_matcher_v3__.StringMatcher" = betterproto.message_field(1) - # Method parameter definition. The key is the parameter index, starting from - # 0. The value is the parameter matching type. - params_match: Dict[ - int, "MethodMatchParameterMatchSpecifier" - ] = betterproto.map_field(2, betterproto.TYPE_UINT32, betterproto.TYPE_MESSAGE) - - -@dataclass(eq=False, repr=False) -class MethodMatchParameterMatchSpecifier(betterproto.Message): - """The parameter matching type.""" - - # If specified, header match will be performed based on the value of the - # header. - exact_match: str = betterproto.string_field(3, group="parameter_match_specifier") - # If specified, header match will be performed based on range. The rule will - # match if the request header value is within this range. The entire request - # header value must represent an integer in base 10 notation: consisting of - # an optional plus or minus sign followed by a sequence of digits. The rule - # will not match if the header value does not represent an integer. Match - # will fail for empty values, floating point numbers or if only a subsequence - # of the header value is an integer. Examples: * For range [-10,0), route - # will match for header value -1, but not for 0, "somestring", 10.9, - # "-1somestring" - range_match: "_____type_v3__.Int64Range" = betterproto.message_field( - 4, group="parameter_match_specifier" - ) - - -@dataclass(eq=False, repr=False) -class DubboProxy(betterproto.Message): - """[#next-free-field: 6]""" - - # The human readable prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(1) - # Configure the protocol used. - protocol_type: "ProtocolType" = betterproto.enum_field(2) - # Configure the serialization protocol used. - serialization_type: "SerializationType" = betterproto.enum_field(3) - # The route table for the connection manager is static and is specified in - # this property. - route_config: List["RouteConfiguration"] = betterproto.message_field(4) - # A list of individual Dubbo filters that make up the filter chain for - # requests made to the Dubbo proxy. Order matters as the filters are - # processed sequentially. For backwards compatibility, if no dubbo_filters - # are specified, a default Dubbo router filter (`envoy.filters.dubbo.router`) - # is used. - dubbo_filters: List["DubboFilter"] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class DubboFilter(betterproto.Message): - """DubboFilter configures a Dubbo filter.""" - - # The name of the filter to instantiate. The name must match a supported - # filter. - name: str = betterproto.string_field(1) - # Filter specific configuration which depends on the filter being - # instantiated. See the supported filters for further documentation. - config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -from ......config.route import v3 as _____config_route_v3__ -from ......type import v3 as _____type_v3__ -from ......type.matcher import v3 as _____type_matcher_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/echo/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/echo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/echo/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/echo/v3/__init__.py deleted file mode 100644 index c5e48ca..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/echo/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/echo/v3/echo.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Echo(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/ext_authz/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/ext_authz/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/ext_authz/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/ext_authz/v3/__init__.py deleted file mode 100644 index 6a2bd2a..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/ext_authz/v3/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ExtAuthz(betterproto.Message): - """ - External Authorization filter calls out to an external service over the - gRPC Authorization API defined by :ref:`CheckRequest - `. A failed check will cause - this filter to close the TCP connection. [#next-free-field: 8] - """ - - # The prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(1) - # The external authorization gRPC service configuration. The default timeout - # is set to 200ms by this filter. - grpc_service: "_____config_core_v3__.GrpcService" = betterproto.message_field(2) - # The filter's behaviour in case the external authorization service does not - # respond back. When it is set to true, Envoy will also allow traffic in case - # of communication failure between authorization service and the proxy. - # Defaults to false. - failure_mode_allow: bool = betterproto.bool_field(3) - # Specifies if the peer certificate is sent to the external service. When - # this field is true, Envoy will include the peer X.509 certificate, if - # available, in the :ref:`certificate`. - include_peer_certificate: bool = betterproto.bool_field(4) - # API version for ext_authz transport protocol. This describes the ext_authz - # gRPC endpoint and version of Check{Request,Response} used on the wire. - transport_api_version: "_____config_core_v3__.ApiVersion" = betterproto.enum_field( - 5 - ) - # Specifies if the filter is enabled with metadata matcher. If this field is - # not specified, the filter will be enabled for all requests. - filter_enabled_metadata: "_____type_matcher_v3__.MetadataMatcher" = ( - betterproto.message_field(6) - ) - # Optional labels that will be passed to :ref:`labels` in :ref:`destination`. The labels will be - # read from :ref:`metadata` with the - # specified key. - bootstrap_metadata_labels_key: str = betterproto.string_field(7) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type.matcher import v3 as _____type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/http_connection_manager/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/http_connection_manager/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/http_connection_manager/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/http_connection_manager/v3/__init__.py deleted file mode 100644 index 7612437..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/http_connection_manager/v3/__init__.py +++ /dev/null @@ -1,873 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class HttpConnectionManagerCodecType(betterproto.Enum): - AUTO = 0 - HTTP1 = 1 - HTTP2 = 2 - HTTP3 = 3 - - -class HttpConnectionManagerServerHeaderTransformation(betterproto.Enum): - OVERWRITE = 0 - APPEND_IF_ABSENT = 1 - PASS_THROUGH = 2 - - -class HttpConnectionManagerForwardClientCertDetails(betterproto.Enum): - SANITIZE = 0 - FORWARD_ONLY = 1 - APPEND_FORWARD = 2 - SANITIZE_SET = 3 - ALWAYS_FORWARD_ONLY = 4 - - -class HttpConnectionManagerPathWithEscapedSlashesAction(betterproto.Enum): - IMPLEMENTATION_SPECIFIC_DEFAULT = 0 - KEEP_UNCHANGED = 1 - REJECT_REQUEST = 2 - UNESCAPE_AND_REDIRECT = 3 - UNESCAPE_AND_FORWARD = 4 - - -class HttpConnectionManagerTracingOperationName(betterproto.Enum): - INGRESS = 0 - EGRESS = 1 - - -@dataclass(eq=False, repr=False) -class HttpConnectionManager(betterproto.Message): - """[#next-free-field: 49]""" - - # Supplies the type of codec that the connection manager should use. - codec_type: "HttpConnectionManagerCodecType" = betterproto.enum_field(1) - # The human readable prefix to use when emitting statistics for the - # connection manager. See the :ref:`statistics documentation - # ` for more information. - stat_prefix: str = betterproto.string_field(2) - # The connection manager’s route table will be dynamically loaded via the RDS - # API. - rds: "Rds" = betterproto.message_field(3, group="route_specifier") - # The route table for the connection manager is static and is specified in - # this property. - route_config: "_____config_route_v3__.RouteConfiguration" = ( - betterproto.message_field(4, group="route_specifier") - ) - # A route table will be dynamically assigned to each request based on request - # attributes (e.g., the value of a header). The "routing scopes" (i.e., route - # tables) and "scope keys" are specified in this message. - scoped_routes: "ScopedRoutes" = betterproto.message_field( - 31, group="route_specifier" - ) - # A list of individual HTTP filters that make up the filter chain for - # requests made to the connection manager. :ref:`Order matters - # ` as the filters are processed - # sequentially as request events happen. - http_filters: List["HttpFilter"] = betterproto.message_field(5) - # Whether the connection manager manipulates the - # :ref:`config_http_conn_man_headers_user-agent` and - # :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See - # the linked documentation for more information. Defaults to false. - add_user_agent: Optional[bool] = betterproto.message_field( - 6, wraps=betterproto.TYPE_BOOL - ) - # Presence of the object defines whether the connection manager emits - # :ref:`tracing ` data to the :ref:`configured tracing - # provider `. - tracing: "HttpConnectionManagerTracing" = betterproto.message_field(7) - # Additional settings for HTTP requests handled by the connection manager. - # These will be applicable to both HTTP1 and HTTP2 requests. - common_http_protocol_options: "_____config_core_v3__.HttpProtocolOptions" = ( - betterproto.message_field(35) - ) - # Additional HTTP/1 settings that are passed to the HTTP/1 codec. - http_protocol_options: "_____config_core_v3__.Http1ProtocolOptions" = ( - betterproto.message_field(8) - ) - # Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - http2_protocol_options: "_____config_core_v3__.Http2ProtocolOptions" = ( - betterproto.message_field(9) - ) - # Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. - # [#not-implemented-hide:] - http3_protocol_options: "_____config_core_v3__.Http3ProtocolOptions" = ( - betterproto.message_field(44) - ) - # An optional override that the connection manager will write to the server - # header in responses. If not set, the default is *envoy*. - server_name: str = betterproto.string_field(10) - # Defines the action to be applied to the Server header on the response path. - # By default, Envoy will overwrite the header with the value specified in - # server_name. - server_header_transformation: "HttpConnectionManagerServerHeaderTransformation" = ( - betterproto.enum_field(34) - ) - # Allows for explicit transformation of the :scheme header on the request - # path. If not set, Envoy's default :ref:`scheme - # ` handling applies. - scheme_header_transformation: "_____config_core_v3__.SchemeHeaderTransformation" = ( - betterproto.message_field(48) - ) - # The maximum request headers size for incoming connections. If unconfigured, - # the default max request headers allowed is 60 KiB. Requests that exceed - # this limit will receive a 431 response. - max_request_headers_kb: Optional[int] = betterproto.message_field( - 29, wraps=betterproto.TYPE_UINT32 - ) - # The stream idle timeout for connections managed by the connection manager. - # If not specified, this defaults to 5 minutes. The default value was - # selected so as not to interfere with any smaller configured timeouts that - # may have existed in configurations prior to the introduction of this - # feature, while introducing robustness to TCP connections that terminate - # without a FIN. This idle timeout applies to new streams and is overridable - # by the :ref:`route-level idle_timeout - # `. Even on a - # stream in which the override applies, prior to receipt of the initial - # request headers, the :ref:`stream_idle_timeout ` applies. Each time an encode/decode event for headers or - # data is processed for the stream, the timer will be reset. If the timeout - # fires, the stream is terminated with a 408 Request Timeout error code if no - # upstream response header has been received, otherwise a stream reset - # occurs. This timeout also specifies the amount of time that Envoy will wait - # for the peer to open enough window to write any remaining stream data once - # the entirety of stream data (local end stream is true) has been buffered - # pending available window. In other words, this timeout defends against a - # peer that does not release enough window to completely write the stream, - # even though all data has been proxied within available flow control - # windows. If the timeout is hit in this case, the :ref:`tx_flush_timeout - # ` counter will be incremented. Note - # that :ref:`max_stream_duration ` does not apply to this corner case. If the - # :ref:`overload action ` - # "envoy.overload_actions.reduce_timeouts" is configured, this timeout is - # scaled according to the value for :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. Note that it is possible to idle timeout - # even if the wire traffic for a stream is non-idle, due to the granularity - # of events presented to the connection manager. For example, while receiving - # very large request headers, it may be the case that there is traffic - # regularly arriving on the wire while the connection manage is only able to - # observe the end-of-headers event, hence the stream may still idle timeout. - # A value of 0 will completely disable the connection manager stream idle - # timeout, although per-route idle timeout overrides will continue to apply. - stream_idle_timeout: timedelta = betterproto.message_field(24) - # The amount of time that Envoy will wait for the entire request to be - # received. The timer is activated when the request is initiated, and is - # disarmed when the last byte of the request is sent upstream (i.e. all - # decoding filters have processed the request), OR when the response is - # initiated. If not specified or set to 0, this timeout is disabled. - request_timeout: timedelta = betterproto.message_field(28) - # The amount of time that Envoy will wait for the request headers to be - # received. The timer is activated when the first byte of the headers is - # received, and is disarmed when the last byte of the headers has been - # received. If not specified or set to 0, this timeout is disabled. - request_headers_timeout: timedelta = betterproto.message_field(41) - # The time that Envoy will wait between sending an HTTP/2 “shutdown - # notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - # This is used so that Envoy provides a grace period for new streams that - # race with the final GOAWAY frame. During this grace period, Envoy will - # continue to accept new streams. After the grace period, a final GOAWAY - # frame is sent and Envoy will start refusing new streams. Draining occurs - # both when a connection hits the idle timeout or during general server - # draining. The default grace period is 5000 milliseconds (5 seconds) if this - # option is not specified. - drain_timeout: timedelta = betterproto.message_field(12) - # The delayed close timeout is for downstream connections managed by the HTTP - # connection manager. It is defined as a grace period after connection close - # processing has been locally initiated during which Envoy will wait for the - # peer to close (i.e., a TCP FIN/RST is received by Envoy from the downstream - # connection) prior to Envoy closing the socket associated with that - # connection. NOTE: This timeout is enforced even when the socket associated - # with the downstream connection is pending a flush of the write buffer. - # However, any progress made writing data to the socket will restart the - # timer associated with this timeout. This means that the total grace period - # for a socket in this state will be - # +. - # Delaying Envoy's connection close and giving the peer the opportunity to - # initiate the close sequence mitigates a race condition that exists when - # downstream clients do not drain/process data in a connection's receive - # buffer after a remote close has been detected via a socket write(). This - # race leads to such clients failing to process the response code sent by - # Envoy, which could result in erroneous downstream processing. If the - # timeout triggers, Envoy will close the connection's socket. The default - # timeout is 1000 ms if this option is not specified. .. NOTE:: To be - # useful in avoiding the race condition described above, this timeout must be - # set to *at least* +<100ms to account for a reasonable "worst" case processing time - # for a full iteration of Envoy's event loop>. .. WARNING:: A value of 0 - # will completely disable delayed close processing. When disabled, the - # downstream connection's socket will be closed immediately after the - # write flush is completed or will never close if the write flush does not - # complete. - delayed_close_timeout: timedelta = betterproto.message_field(26) - # Configuration for :ref:`HTTP access logs ` - # emitted by the connection manager. - access_log: List[ - "_____config_accesslog_v3__.AccessLog" - ] = betterproto.message_field(13) - # If set to true, the connection manager will use the real remote address of - # the client connection when determining internal versus external origin and - # manipulating various headers. If set to false or absent, the connection - # manager will use the :ref:`config_http_conn_man_headers_x-forwarded-for` - # HTTP header. See the documentation for - # :ref:`config_http_conn_man_headers_x-forwarded-for`, - # :ref:`config_http_conn_man_headers_x-envoy-internal`, and - # :ref:`config_http_conn_man_headers_x-envoy-external-address` for more - # information. - use_remote_address: Optional[bool] = betterproto.message_field( - 14, wraps=betterproto.TYPE_BOOL - ) - # The number of additional ingress proxy hops from the right side of the - # :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust - # when determining the origin client's IP address. The default is zero if - # this option is not specified. See the documentation for - # :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - xff_num_trusted_hops: int = betterproto.uint32_field(19) - # The configuration for the original IP detection extensions. When configured - # the extensions will be called along with the request headers and - # information about the downstream connection, such as the directly connected - # address. Each extension will then use these parameters to decide the - # request's effective remote address. If an extension fails to detect the - # original IP address and isn't configured to reject the request, the HCM - # will try the remaining extensions until one succeeds or rejects the - # request. If the request isn't rejected nor any extension succeeds, the HCM - # will fallback to using the remote address. .. WARNING:: Extensions - # cannot be used in conjunction with :ref:`use_remote_address ` nor :ref:`xff_num_trusted_hops `. [#extension-category: - # envoy.http.original_ip_detection] - original_ip_detection_extensions: List[ - "_____config_core_v3__.TypedExtensionConfig" - ] = betterproto.message_field(46) - # Configures what network addresses are considered internal for stats and - # header sanitation purposes. If unspecified, only RFC1918 IP addresses will - # be considered internal. See the documentation for - # :ref:`config_http_conn_man_headers_x-envoy-internal` for more information - # about internal/external addresses. - internal_address_config: "HttpConnectionManagerInternalAddressConfig" = ( - betterproto.message_field(25) - ) - # If set, Envoy will not append the remote address to the - # :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may - # be used in conjunction with HTTP filters that explicitly manipulate XFF - # after the HTTP connection manager has mutated the request headers. While - # :ref:`use_remote_address ` will - # also suppress XFF addition, it has consequences for logging and other Envoy - # uses of the remote address, so *skip_xff_append* should be used when only - # an elision of XFF addition is intended. - skip_xff_append: bool = betterproto.bool_field(21) - # Via header value to append to request and response headers. If this is - # empty, no via header will be appended. - via: str = betterproto.string_field(22) - # Whether the connection manager will generate the :ref:`x-request-id - # ` header if it does not exist. - # This defaults to true. Generating a random UUID4 is expensive so in high - # throughput scenarios where this feature is not desired it can be disabled. - generate_request_id: Optional[bool] = betterproto.message_field( - 15, wraps=betterproto.TYPE_BOOL - ) - # Whether the connection manager will keep the :ref:`x-request-id - # ` header if passed for a request - # that is edge (Edge request is the request from external clients to front - # Envoy) and not reset it, which is the current Envoy behaviour. This - # defaults to false. - preserve_external_request_id: bool = betterproto.bool_field(32) - # If set, Envoy will always set :ref:`x-request-id - # ` header in response. If this is - # false or not set, the request ID is returned in responses only if tracing - # is forced using :ref:`x-envoy-force-trace ` header. - always_set_request_id_in_response: bool = betterproto.bool_field(37) - # How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client- - # cert` (XFCC) HTTP header. - forward_client_cert_details: "HttpConnectionManagerForwardClientCertDetails" = ( - betterproto.enum_field(16) - ) - # This field is valid only when :ref:`forward_client_cert_details ` is APPEND_FORWARD or SANITIZE_SET - # and the client connection is mTLS. It specifies the fields in the client - # certificate to be forwarded. Note that in the - # :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* - # is always set, and *By* is always set when the client certificate presents - # the URI type Subject Alternative Name value. - set_current_client_cert_details: "HttpConnectionManagerSetCurrentClientCertDetails" = betterproto.message_field( - 17 - ) - # If proxy_100_continue is true, Envoy will proxy incoming "Expect: - # 100-continue" headers upstream, and forward "100 Continue" responses - # downstream. If this is false or not set, Envoy will instead strip the - # "Expect: 100-continue" header, and send a "100 Continue" response itself. - proxy_100_continue: bool = betterproto.bool_field(18) - # If :ref:`use_remote_address ` is - # true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the - # remote address is an IPv4 address, the address will be mapped to IPv6 - # before it is appended to *x-forwarded-for*. This is useful for testing - # compatibility of upstream services that parse the header value. For - # example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 - # Addresses `_ for - # details. This will also affect the :ref:`config_http_conn_man_headers_x- - # envoy-external-address` header. See :ref:`http_connection_manager.represent - # _ipv4_remote_address_as_ipv4_mapped_ipv6 ` for runtime control. [#not- - # implemented-hide:] - represent_ipv4_remote_address_as_ipv4_mapped_ipv6: bool = betterproto.bool_field(20) - upgrade_configs: List[ - "HttpConnectionManagerUpgradeConfig" - ] = betterproto.message_field(23) - # Should paths be normalized according to RFC 3986 before any processing of - # requests by HTTP filters or routing? This affects the upstream *:path* - # header as well. For paths that fail this check, Envoy will respond with 400 - # to paths that are malformed. This defaults to false currently but will - # default true in the future. When not specified, this value may be - # overridden by the runtime variable :ref:`http_connection_manager.normalize_ - # path`. See `Normalization and - # Comparison `_ for details of - # normalization. Note that Envoy does not perform `case normalization - # `_ - normalize_path: Optional[bool] = betterproto.message_field( - 30, wraps=betterproto.TYPE_BOOL - ) - # Determines if adjacent slashes in the path are merged into one before any - # processing of requests by HTTP filters or routing. This affects the - # upstream *:path* header as well. Without setting this option, incoming - # requests with path `//dir///file` will not match against route with - # `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging - # is not part of `HTTP spec `_ and is - # provided for convenience. - merge_slashes: bool = betterproto.bool_field(33) - # Action to take when request URL path contains escaped slash sequences (%2F, - # %2f, %5C and %5c). The default value can be overridden by the :ref:`http_co - # nnection_manager.path_with_escaped_slashes_action` runtime variable. The :ref:`http_con - # nection_manager.path_with_escaped_slashes_action_sampling` runtime variable can - # be used to apply the action to a portion of all requests. - path_with_escaped_slashes_action: "HttpConnectionManagerPathWithEscapedSlashesAction" = betterproto.enum_field( - 45 - ) - # The configuration of the request ID extension. This includes operations - # such as generation, validation, and associated tracing operations. If - # empty, the :ref:`UuidRequestIdConfig - # ` - # default extension is used with default parameters. See the documentation - # for that extension for details on what it does. Customizing the - # configuration for the default extension can be achieved by configuring it - # explicitly here. For example, to disable trace reason packing, the - # following configuration can be used: .. validated-code-block:: yaml - # :type-name: envoy.extensions.filters.network.http_connection_manager.v3.Req - # uestIDExtension typed_config: "@type": - # type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig - # pack_trace_reason: false [#extension-category: envoy.request_id] - request_id_extension: "RequestIdExtension" = betterproto.message_field(36) - # The configuration to customize local reply returned by Envoy. It can - # customize status code, body text and response content type. If not - # specified, status code and text body are hard coded in Envoy, the response - # content type is plain text. - local_reply_config: "LocalReplyConfig" = betterproto.message_field(38) - # Determines if the port part should be removed from host/authority header - # before any processing of request by HTTP filters or routing. The port would - # be removed only if it is equal to the - # :ref:`listener's` - # local port. This affects the upstream host header unless the method is - # CONNECT in which case if no filter adds a port the original port will be - # restored before headers are sent upstream. Without setting this option, - # incoming requests with host `example:443` will not match against route with - # :ref:`domains` - # match set to `example`. Defaults to `false`. Note that port removal is not - # part of `HTTP spec `_ and is provided - # for convenience. Only one of `strip_matching_host_port` or - # `strip_any_host_port` can be set. - strip_matching_host_port: bool = betterproto.bool_field(39) - # Determines if the port part should be removed from host/authority header - # before any processing of request by HTTP filters or routing. This affects - # the upstream host header unless the method is CONNECT in which case if no - # filter adds a port the original port will be restored before headers are - # sent upstream. Without setting this option, incoming requests with host - # `example:443` will not match against route with - # :ref:`domains` - # match set to `example`. Defaults to `false`. Note that port removal is not - # part of `HTTP spec `_ and is provided - # for convenience. Only one of `strip_matching_host_port` or - # `strip_any_host_port` can be set. - strip_any_host_port: bool = betterproto.bool_field(42, group="strip_port_mode") - # Governs Envoy's behavior when receiving invalid HTTP from downstream. If - # this option is false (default), Envoy will err on the conservative side - # handling HTTP errors, terminating both HTTP/1.1 and HTTP/2 connections when - # receiving an invalid request. If this option is set to true, Envoy will be - # more permissive, only resetting the invalid stream in the case of HTTP/2 - # and leaving the connection open where possible (if the entire request is - # read for HTTP/1.1) In general this should be true for deployments receiving - # trusted traffic (L2 Envoys, company-internal mesh) and false when receiving - # untrusted traffic (edge deployments). If different behaviors for - # invalid_http_message for HTTP/1 and HTTP/2 are desired, one should use the - # new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message ` or the new HTTP/2 option - # :ref:`override_stream_error_on_invalid_http_message ` *not* the deprecated but similarly named - # :ref:`stream_error_on_invalid_http_messaging ` - stream_error_on_invalid_http_message: Optional[bool] = betterproto.message_field( - 40, wraps=betterproto.TYPE_BOOL - ) - # [#not-implemented-hide:] Path normalization configuration. This includes - # configurations for transformations (e.g. RFC 3986 normalization or merge - # adjacent slashes) and the policy to apply them. The policy determines - # whether transformations affect the forwarded *:path* header. RFC 3986 path - # normalization is enabled by default and the default policy is that the - # normalized header will be forwarded. See :ref:`PathNormalizationOptions ` for details. - path_normalization_options: "HttpConnectionManagerPathNormalizationOptions" = ( - betterproto.message_field(43) - ) - # Determines if trailing dot of the host should be removed from - # host/authority header before any processing of request by HTTP filters or - # routing. This affects the upstream host header. Without setting this - # option, incoming requests with host `example.com.` will not match against - # route with - # :ref:`domains` - # match set to `example.com`. Defaults to `false`. When the incoming request - # contains a host/authority header that includes a port number, setting this - # option will strip a trailing dot, if present, from the host section, - # leaving the port as is (e.g. host value `example.com.:443` will be updated - # to `example.com:443`). - strip_trailing_host_dot: bool = betterproto.bool_field(47) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerTracing(betterproto.Message): - """[#next-free-field: 10]""" - - # Target percentage of requests managed by this HTTP connection manager that - # will be force traced if the :ref:`x-client-trace-id - # ` header is set. This field - # is a direct analog for the runtime variable 'tracing.client_sampling' in - # the :ref:`HTTP Connection Manager `. Default: - # 100% - client_sampling: "_____type_v3__.Percent" = betterproto.message_field(3) - # Target percentage of requests managed by this HTTP connection manager that - # will be randomly selected for trace generation, if not requested by the - # client or not forced. This field is a direct analog for the runtime - # variable 'tracing.random_sampling' in the :ref:`HTTP Connection Manager - # `. Default: 100% - random_sampling: "_____type_v3__.Percent" = betterproto.message_field(4) - # Target percentage of requests managed by this HTTP connection manager that - # will be traced after all other sampling checks have been applied (client- - # directed, force tracing, random sampling). This field functions as an upper - # limit on the total configured sampling rate. For instance, setting - # client_sampling to 100% but overall_sampling to 1% will result in only 1% - # of client requests with the appropriate headers to be force traced. This - # field is a direct analog for the runtime variable 'tracing.global_enabled' - # in the :ref:`HTTP Connection Manager `. - # Default: 100% - overall_sampling: "_____type_v3__.Percent" = betterproto.message_field(5) - # Whether to annotate spans with additional data. If true, spans will include - # logs for stream events. - verbose: bool = betterproto.bool_field(6) - # Maximum length of the request path to extract and include in the HttpUrl - # tag. Used to truncate lengthy request paths to meet the needs of a tracing - # backend. Default: 256 - max_path_tag_length: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # A list of custom tags with unique tag name to create tags for the active - # span. - custom_tags: List["_____type_tracing_v3__.CustomTag"] = betterproto.message_field(8) - # Configuration for an external tracing provider. If not specified, no - # tracing will be performed. .. attention:: Please be aware that - # *envoy.tracers.opencensus* provider can only be configured once in Envoy - # lifetime. Any attempts to reconfigure it or to use different - # configurations for different HCM filters will be rejected. Such a - # constraint is inherent to OpenCensus itself. It cannot be overcome without - # changes on OpenCensus side. - provider: "_____config_trace_v3__.TracingHttp" = betterproto.message_field(9) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerInternalAddressConfig(betterproto.Message): - # Whether unix socket addresses should be considered internal. - unix_sockets: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerSetCurrentClientCertDetails(betterproto.Message): - """[#next-free-field: 7]""" - - # Whether to forward the subject of the client cert. Defaults to false. - subject: Optional[bool] = betterproto.message_field(1, wraps=betterproto.TYPE_BOOL) - # Whether to forward the entire client cert in URL encoded PEM format. This - # will appear in the XFCC header comma separated from other values with the - # value Cert="PEM". Defaults to false. - cert: bool = betterproto.bool_field(3) - # Whether to forward the entire client cert chain (including the leaf cert) - # in URL encoded PEM format. This will appear in the XFCC header comma - # separated from other values with the value Chain="PEM". Defaults to false. - chain: bool = betterproto.bool_field(6) - # Whether to forward the DNS type Subject Alternative Names of the client - # cert. Defaults to false. - dns: bool = betterproto.bool_field(4) - # Whether to forward the URI type Subject Alternative Name of the client - # cert. Defaults to false. - uri: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerUpgradeConfig(betterproto.Message): - """ - The configuration for HTTP upgrades. For each upgrade type desired, an - UpgradeConfig must be added. .. warning:: The current implementation of - upgrade headers does not handle multi-valued upgrade headers. Support - for multi-valued headers may be added in the future if needed. .. - warning:: The current implementation of upgrade headers does not work - with HTTP/2 upstreams. - """ - - # The case-insensitive name of this upgrade, e.g. "websocket". For each - # upgrade type present in upgrade_configs, requests with Upgrade: - # [upgrade_type] will be proxied upstream. - upgrade_type: str = betterproto.string_field(1) - # If present, this represents the filter chain which will be created for this - # type of upgrade. If no filters are present, the filter chain for HTTP - # connections will be used for this upgrade type. - filters: List["HttpFilter"] = betterproto.message_field(2) - # Determines if upgrades are enabled or disabled by default. Defaults to - # true. This can be overridden on a per-route basis with :ref:`cluster - # ` as - # documented in the :ref:`upgrade documentation `. - enabled: Optional[bool] = betterproto.message_field(3, wraps=betterproto.TYPE_BOOL) - - -@dataclass(eq=False, repr=False) -class HttpConnectionManagerPathNormalizationOptions(betterproto.Message): - """ - [#not-implemented-hide:] Transformations that apply to path headers. - Transformations are applied before any processing of requests by HTTP - filters, routing, and matching. Only the normalized path will be visible - internally if a transformation is enabled. Any path rewrites that the - router performs (e.g. :ref:`regex_rewrite - ` or - :ref:`prefix_rewrite - `) will - apply to the *:path* header destined for the upstream. Note: access logging - and tracing will show the original *:path* header. - """ - - # [#not-implemented-hide:] Normalization applies internally before any - # processing of requests by HTTP filters, routing, and matching *and* will - # affect the forwarded *:path* header. Defaults to :ref:`NormalizePathRFC3986 - # `. When not specified, this value may be overridden by the runtime - # variable :ref:`http_connection_manager.normalize_path`. Envoy will respond with 400 to paths that are - # malformed (e.g. for paths that fail RFC 3986 normalization due to - # disallowed characters.) - forwarding_transformation: "_____type_http_v3__.PathTransformation" = ( - betterproto.message_field(1) - ) - # [#not-implemented-hide:] Normalization only applies internally before any - # processing of requests by HTTP filters, routing, and matching. These will - # be applied after full transformation is applied. The *:path* header before - # this transformation will be restored in the router filter and sent upstream - # unless it was mutated by a filter. Defaults to no transformations. Multiple - # actions can be applied in the same Transformation, forming a sequential - # pipeline. The transformations will be performed in the order that they - # appear. Envoy will respond with 400 to paths that are malformed (e.g. for - # paths that fail RFC 3986 normalization due to disallowed characters.) - http_filter_transformation: "_____type_http_v3__.PathTransformation" = ( - betterproto.message_field(2) - ) - - -@dataclass(eq=False, repr=False) -class LocalReplyConfig(betterproto.Message): - """The configuration to customize local reply returned by Envoy.""" - - # Configuration of list of mappers which allows to filter and change local - # response. The mappers will be checked by the specified order until one is - # matched. - mappers: List["ResponseMapper"] = betterproto.message_field(1) - # The configuration to form response body from the :ref:`command operators - # ` and to specify response content type - # as one of: plain/text or application/json. Example one: "plain/text" - # ``body_format``. .. validated-code-block:: yaml :type-name: - # envoy.config.core.v3.SubstitutionFormatString text_format: - # "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" The following - # response body in "plain/text" format will be generated for a request with - # local reply body of "upstream connection error", response_code=503 and - # path=/foo. .. code-block:: text upstream connect error:503:path=/foo - # Example two: "application/json" ``body_format``. .. validated-code-block:: - # yaml :type-name: envoy.config.core.v3.SubstitutionFormatString - # json_format: status: "%RESPONSE_CODE%" message: - # "%LOCAL_REPLY_BODY%" path: "%REQ(:path)%" The following response body - # in "application/json" format would be generated for a request with local - # reply body of "upstream connection error", response_code=503 and path=/foo. - # .. code-block:: json { "status": 503, "message": "upstream - # connection error", "path": "/foo" } - body_format: "_____config_core_v3__.SubstitutionFormatString" = ( - betterproto.message_field(2) - ) - - -@dataclass(eq=False, repr=False) -class ResponseMapper(betterproto.Message): - """ - The configuration to filter and change local response. [#next-free-field: - 6] - """ - - # Filter to determine if this mapper should apply. - filter: "_____config_accesslog_v3__.AccessLogFilter" = betterproto.message_field(1) - # The new response status code if specified. - status_code: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The new local reply body text if specified. It will be used in the - # `%LOCAL_REPLY_BODY%` command operator in the `body_format`. - body: "_____config_core_v3__.DataSource" = betterproto.message_field(3) - # A per mapper `body_format` to override the :ref:`body_format `. It will be used when this mapper is matched. - body_format_override: "_____config_core_v3__.SubstitutionFormatString" = ( - betterproto.message_field(4) - ) - # HTTP headers to add to a local reply. This allows the response mapper to - # append, to add or to override headers of any local reply before it is sent - # to a downstream client. - headers_to_add: List[ - "_____config_core_v3__.HeaderValueOption" - ] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class Rds(betterproto.Message): - # Configuration source specifier for RDS. - config_source: "_____config_core_v3__.ConfigSource" = betterproto.message_field(1) - # The name of the route configuration. This name will be passed to the RDS - # API. This allows an Envoy configuration with multiple HTTP listeners (and - # associated HTTP connection manager filters) to use different route - # configurations. - route_config_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ScopedRouteConfigurationsList(betterproto.Message): - """ - This message is used to work around the limitations with 'oneof' and - repeated fields. - """ - - scoped_route_configurations: List[ - "_____config_route_v3__.ScopedRouteConfiguration" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ScopedRoutes(betterproto.Message): - """[#next-free-field: 6]""" - - # The name assigned to the scoped routing configuration. - name: str = betterproto.string_field(1) - # The algorithm to use for constructing a scope key for each request. - scope_key_builder: "ScopedRoutesScopeKeyBuilder" = betterproto.message_field(2) - # Configuration source specifier for RDS. This config source is used to - # subscribe to RouteConfiguration resources specified in - # ScopedRouteConfiguration messages. - rds_config_source: "_____config_core_v3__.ConfigSource" = betterproto.message_field( - 3 - ) - # The set of routing scopes corresponding to the HCM. A scope is assigned to - # a request by matching a key constructed from the request's attributes - # according to the algorithm specified by the :ref:`ScopeKeyBuilder` in this message. - scoped_route_configurations_list: "ScopedRouteConfigurationsList" = ( - betterproto.message_field(4, group="config_specifier") - ) - # The set of routing scopes associated with the HCM will be dynamically - # loaded via the SRDS API. A scope is assigned to a request by matching a key - # constructed from the request's attributes according to the algorithm - # specified by the :ref:`ScopeKeyBuilder` in this - # message. - scoped_rds: "ScopedRds" = betterproto.message_field(5, group="config_specifier") - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilder(betterproto.Message): - """ - Specifies the mechanism for constructing "scope keys" based on HTTP request - attributes. These keys are matched against a set of - :ref:`Key` - objects assembled from :ref:`ScopedRouteConfiguration` messages distributed via SRDS (the - Scoped Route Discovery Service) or assigned statically via :ref:`scoped_rou - te_configurations_list`. Upon - receiving a request's headers, the Router will build a key using the - algorithm specified by this message. This key will be used to look up the - routing table (i.e., the :ref:`RouteConfiguration`) to use for the request. - """ - - # The final(built) scope key consists of the ordered union of these - # fragments, which are compared in order with the fragments of a :ref:`Scoped - # RouteConfiguration`. A missing fragment during comparison will make the key invalid, i.e., - # the computed key doesn't match any key. - fragments: List[ - "ScopedRoutesScopeKeyBuilderFragmentBuilder" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilderFragmentBuilder(betterproto.Message): - """ - Specifies the mechanism for constructing key fragments which are composed - into scope keys. - """ - - # Specifies how a header field's value should be extracted. - header_value_extractor: "ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractor" = betterproto.message_field( - 1, group="type" - ) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractor( - betterproto.Message -): - """ - Specifies how the value of a header should be extracted. The following - example maps the structure of a header to the fields in this message. .. - code:: <0> <1> <-- index X-Header: a=b;c=d | - || | | || \----> | || | - |\----> | | | \----> - | \----> Each 'a=b' key-value pair - constitutes an 'element' of the header field. - """ - - # The name of the header field to extract the value from. .. note:: If the - # header appears multiple times only the first value is used. - name: str = betterproto.string_field(1) - # The element separator (e.g., ';' separates 'a;b;c;d'). Default: empty - # string. This causes the entirety of the header field to be extracted. If - # this field is set to an empty string and 'index' is used in the oneof - # below, 'index' must be set to 0. - element_separator: str = betterproto.string_field(2) - # Specifies the zero based index of the element to extract. Note Envoy - # concatenates multiple values of the same header key into a comma separated - # string, the splitting always happens after the concatenation. - index: int = betterproto.uint32_field(3, group="extract_type") - # Specifies the key value pair to extract the value from. - element: "ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractorKvElement" = betterproto.message_field( - 4, group="extract_type" - ) - - -@dataclass(eq=False, repr=False) -class ScopedRoutesScopeKeyBuilderFragmentBuilderHeaderValueExtractorKvElement( - betterproto.Message -): - """Specifies a header field's key value pair to match on.""" - - # The separator between key and value (e.g., '=' separates 'k=v;...'). If an - # element is an empty string, the element is ignored. If an element contains - # no separator, the whole element is parsed as key and the fragment value is - # an empty string. If there are multiple values for a matched key, the first - # value is returned. - separator: str = betterproto.string_field(1) - # The key to match on. - key: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class ScopedRds(betterproto.Message): - # Configuration source specifier for scoped RDS. - scoped_rds_config_source: "_____config_core_v3__.ConfigSource" = ( - betterproto.message_field(1) - ) - # xdstp:// resource locator for scoped RDS collection. [#not-implemented- - # hide:] - srds_resources_locator: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class HttpFilter(betterproto.Message): - """[#next-free-field: 7]""" - - # The name of the filter configuration. The name is used as a fallback to - # select an extension if the type of the configuration proto is not - # sufficient. It also serves as a resource name in ExtensionConfigDS. - name: str = betterproto.string_field(1) - # Filter specific configuration which depends on the filter being - # instantiated. See the supported filters for further documentation. To - # support configuring a :ref:`match tree `, use - # an :ref:`ExtensionWithMatcher - # ` with - # the desired HTTP filter. [#extension-category: envoy.filters.http] - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 4, group="config_type" - ) - # Configuration source specifier for an extension configuration discovery - # service. In case of a failure and without the default configuration, the - # HTTP listener responds with code 500. Extension configs delivered through - # this mechanism are not expected to require warming (see - # https://github.com/envoyproxy/envoy/issues/12061). To support configuring a - # :ref:`match tree `, use an - # :ref:`ExtensionWithMatcher - # ` with - # the desired HTTP filter. This works for both the default filter - # configuration as well as for filters provided via the API. - config_discovery: "_____config_core_v3__.ExtensionConfigSource" = ( - betterproto.message_field(5, group="config_type") - ) - # If true, clients that do not support this filter may ignore the filter but - # otherwise accept the config. Otherwise, clients that do not support this - # filter must reject the config. This is also same with typed per filter - # config. - is_optional: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class RequestIdExtension(betterproto.Message): - # Request ID extension specific configuration. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class EnvoyMobileHttpConnectionManager(betterproto.Message): - """ - [#protodoc-title: Envoy Mobile HTTP connection manager] HTTP connection - manager for use in Envoy mobile. [#extension: - envoy.filters.network.envoy_mobile_http_connection_manager] - """ - - # The configuration for the underlying HttpConnectionManager which will be - # instantiated for Envoy mobile. - config: "HttpConnectionManager" = betterproto.message_field(1) - - -from ......config.accesslog import v3 as _____config_accesslog_v3__ -from ......config.core import v3 as _____config_core_v3__ -from ......config.route import v3 as _____config_route_v3__ -from ......config.trace import v3 as _____config_trace_v3__ -from ......type import v3 as _____type_v3__ -from ......type.http import v3 as _____type_http_v3__ -from ......type.tracing import v3 as _____type_tracing_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/local_ratelimit/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/local_ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/local_ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/local_ratelimit/v3/__init__.py deleted file mode 100644 index dd9bbd2..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/local_ratelimit/v3/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class LocalRateLimit(betterproto.Message): - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The token bucket configuration to use for rate limiting connections that - # are processed by the filter's filter chain. Each incoming connection - # processed by the filter consumes a single token. If the token is available, - # the connection will be allowed. If no tokens are available, the connection - # will be immediately closed. .. note:: In the current implementation each - # filter and filter chain has an independent rate limit. .. note:: In the - # current implementation the token bucket's :ref:`fill_interval - # ` must be >= 50ms to - # avoid too aggressive refills. - token_bucket: "_____type_v3__.TokenBucket" = betterproto.message_field(2) - # Runtime flag that controls whether the filter is enabled or not. If not - # specified, defaults to enabled. - runtime_enabled: "_____config_core_v3__.RuntimeFeatureFlag" = ( - betterproto.message_field(3) - ) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/v3/__init__.py deleted file mode 100644 index 5072882..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/v3/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/meta_protocol_proxy/matcher/action/v3/action.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RouteAction(betterproto.Message): - """Configuration for the route match action. [#not-implemented-hide:]""" - - # Indicates the upstream cluster to which the request should be routed. - cluster: str = betterproto.string_field(1, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. Currently ClusterWeight only supports the name and weight fields. - weighted_clusters: "_______config_route_v3__.WeightedCluster" = ( - betterproto.message_field(2, group="cluster_specifier") - ) - - -from ........config.route import v3 as _______config_route_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/v3/__init__.py deleted file mode 100644 index c73af94..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/matcher/v3/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/meta_protocol_proxy/matcher/v3/matcher.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ServiceMatchInput(betterproto.Message): - """ - Used to match request service of the downstream request. Only applicable if - a service provided by the application protocol. [#not-implemented-hide:] - """ - - pass - - -@dataclass(eq=False, repr=False) -class MethodMatchInput(betterproto.Message): - """ - Used to match request method of the downstream request. Only applicable if - a method provided by the application protocol. [#not-implemented-hide:] - """ - - pass - - -@dataclass(eq=False, repr=False) -class PropertyMatchInput(betterproto.Message): - """ - Used to match an arbitrary property of the downstream request. These - properties are populated by the codecs of application protocols. [#not- - implemented-hide:] - """ - - # The property name to match on. - property_name: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/v3/__init__.py deleted file mode 100644 index 34df5f9..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/meta_protocol_proxy/v3/__init__.py +++ /dev/null @@ -1,98 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/meta_protocol_proxy/v3/meta_protocol_proxy.proto, envoy/extensions/filters/network/meta_protocol_proxy/v3/route.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RouteConfiguration(betterproto.Message): - """ - [#protodoc-title: Meta Protocol Proxy Route Configuration] The meta - protocol proxy makes use of the `xds matching API` for routing - configurations. In the below example, we combine a top level tree matcher - with a linear matcher to match the incoming requests, and send the matching - requests to v1 of the upstream service. name: demo-v1 route: - matcher_tree: input: name: request-service typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.meta_protocol - _proxy.matcher.v3.ServiceMatchInput exact_match_map: map: - org.apache.dubbo.samples.basic.api.DemoService: matcher: - matcher_list: matchers: - predicate: - and_matcher: predicate: - - single_predicate: input: - name: request-properties typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.meta_protocol - _proxy.matcher.v3.PropertyMatchInput - property_name: version value_match: - exact: v1 - single_predicate: - input: name: request-properties - typed_config: "@type": type.googleapis.com/envo - y.extensions.filters.network.meta_protocol_proxy.matcher.v3.PropertyMatchIn - put property_name: user - value_match: exact: john - on_match: action: name: route - typed_config: "@type": type.googleapis.com/envoy.exte - nsions.filters.network.meta_protocol_proxy.matcher.action.v3.routeAction - cluster: outbound|20880|v1|org.apache.dubbo.samples.basic.api.demoservice - [#not-implemented-hide:] - """ - - # The name of the route configuration. For example, it might match - # route_config_name in - # envoy.extensions.filters.network.meta_protocol_proxy.v3.Rds. - name: str = betterproto.string_field(1) - # The match tree to use when resolving route actions for incoming requests. - route: "______xds_type_matcher_v3__.Matcher" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class MetaProtocolProxy(betterproto.Message): - """[#not-implemented-hide:] [#next-free-field: 6]""" - - # The human readable prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(1) - # The application protocol built on top of the meta protocol proxy. - application_protocol: "ApplicationProtocol" = betterproto.message_field(2) - # The meta protocol proxies route table will be dynamically loaded via the - # meta RDS API. - rds: "MetaRds" = betterproto.message_field(3, group="route_specifier") - # The route table for the meta protocol proxy is static and is specified in - # this property. - route_config: "RouteConfiguration" = betterproto.message_field( - 4, group="route_specifier" - ) - # A list of individual Layer-7 filters that make up the filter chain for - # requests made to the meta protocol proxy. Order matters as the filters are - # processed sequentially as request events happen. - meta_protocol_filters: List[ - "_____config_core_v3__.TypedExtensionConfig" - ] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class ApplicationProtocol(betterproto.Message): - """[#not-implemented-hide:]""" - - # The name of the application protocol. - name: str = betterproto.string_field(1) - # The codec which encodes and decodes the application protocol. - codec: "_____config_core_v3__.TypedExtensionConfig" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class MetaRds(betterproto.Message): - """[#not-implemented-hide:]""" - - # Configuration source specifier for RDS. - config_source: "_____config_core_v3__.ConfigSource" = betterproto.message_field(1) - # The name of the route configuration. This name will be passed to the RDS - # API. This allows an Envoy configuration with multiple meta protocol proxies - # to use different route configurations. - route_config_name: str = betterproto.string_field(2) - - -from .......xds.type.matcher import v3 as ______xds_type_matcher_v3__ -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/mongo_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/mongo_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/mongo_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/mongo_proxy/v3/__init__.py deleted file mode 100644 index e8afb37..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/mongo_proxy/v3/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class MongoProxy(betterproto.Message): - """[#next-free-field: 6]""" - - # The human readable prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The optional path to use for writing Mongo access logs. If not access log - # path is specified no access logs will be written. Note that access log is - # also gated :ref:`runtime `. - access_log: str = betterproto.string_field(2) - # Inject a fixed delay before proxying a Mongo operation. Delays are applied - # to the following MongoDB operations: Query, Insert, GetMore, and - # KillCursors. Once an active delay is in progress, all incoming data up - # until the timer event fires will be a part of the delay. - delay: "___common_fault_v3__.FaultDelay" = betterproto.message_field(3) - # Flag to specify whether :ref:`dynamic metadata - # ` should be emitted. - # Defaults to false. - emit_dynamic_metadata: bool = betterproto.bool_field(4) - # List of commands to emit metrics for. Defaults to "delete", "insert", and - # "update". Note that metrics will not be emitted for "find" commands, since - # those are considered queries, and metrics for those are emitted under a - # dedicated "query" namespace. - commands: List[str] = betterproto.string_field(5) - - -from ....common.fault import v3 as ___common_fault_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/ratelimit/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/ratelimit/v3/__init__.py deleted file mode 100644 index 25cf742..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/ratelimit/v3/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """[#next-free-field: 7]""" - - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The rate limit domain to use in the rate limit service request. - domain: str = betterproto.string_field(2) - # The rate limit descriptor list to use in the rate limit service request. - descriptors: List[ - "____common_ratelimit_v3__.RateLimitDescriptor" - ] = betterproto.message_field(3) - # The timeout in milliseconds for the rate limit service RPC. If not set, - # this defaults to 20ms. - timeout: timedelta = betterproto.message_field(4) - # The filter's behaviour in case the rate limiting service does not respond - # back. When it is set to true, Envoy will not allow traffic in case of - # communication failure between rate limiting service and the proxy. Defaults - # to false. - failure_mode_deny: bool = betterproto.bool_field(5) - # Configuration for an external rate limit service provider. If not - # specified, any calls to the rate limit service will immediately return - # success. - rate_limit_service: "_____config_ratelimit_v3__.RateLimitServiceConfig" = ( - betterproto.message_field(6) - ) - - -from ......config.ratelimit import v3 as _____config_ratelimit_v3__ -from .....common.ratelimit import v3 as ____common_ratelimit_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/rbac/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/rbac/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/rbac/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/rbac/v3/__init__.py deleted file mode 100644 index 1e895f5..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/rbac/v3/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/rbac/v3/rbac.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class RbacEnforcementType(betterproto.Enum): - ONE_TIME_ON_FIRST_BYTE = 0 - CONTINUOUS = 1 - - -@dataclass(eq=False, repr=False) -class Rbac(betterproto.Message): - """ - RBAC network filter config. Header should not be used in rules/shadow_rules - in RBAC network filter as this information is only available in :ref:`RBAC - http filter `. [#next-free-field: 6] - """ - - # Specify the RBAC rules to be applied globally. If absent, no enforcing RBAC - # policy will be applied. If present and empty, DENY. - rules: "_____config_rbac_v3__.Rbac" = betterproto.message_field(1) - # Shadow rules are not enforced by the filter but will emit stats and logs - # and can be used for rule testing. If absent, no shadow RBAC policy will be - # applied. - shadow_rules: "_____config_rbac_v3__.Rbac" = betterproto.message_field(2) - # If specified, shadow rules will emit stats with the given prefix. This is - # useful to distinguish the stat when there are more than 1 RBAC filter - # configured with shadow rules. - shadow_rules_stat_prefix: str = betterproto.string_field(5) - # The prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(3) - # RBAC enforcement strategy. By default RBAC will be enforced only once when - # the first byte of data arrives from the downstream. When used in - # conjunction with filters that emit dynamic metadata after decoding every - # payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to CONTINUOUS - # to enforce RBAC policies on every message boundary. - enforcement_type: "RbacEnforcementType" = betterproto.enum_field(4) - - -from ......config.rbac import v3 as _____config_rbac_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/redis_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/redis_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/redis_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/redis_proxy/v3/__init__.py deleted file mode 100644 index 9fc8751..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/redis_proxy/v3/__init__.py +++ /dev/null @@ -1,248 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class RedisProxyConnPoolSettingsReadPolicy(betterproto.Enum): - MASTER = 0 - PREFER_MASTER = 1 - REPLICA = 2 - PREFER_REPLICA = 3 - ANY = 4 - - -class RedisProxyRedisFaultRedisFaultType(betterproto.Enum): - DELAY = 0 - ERROR = 1 - - -@dataclass(eq=False, repr=False) -class RedisProxy(betterproto.Message): - """[#next-free-field: 9]""" - - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # Network settings for the connection pool to the upstream clusters. - settings: "RedisProxyConnPoolSettings" = betterproto.message_field(3) - # Indicates that latency stat should be computed in microseconds. By default - # it is computed in milliseconds. This does not apply to upstream command - # stats currently. - latency_in_micros: bool = betterproto.bool_field(4) - # List of **unique** prefixes used to separate keys from different workloads - # to different clusters. Envoy will always favor the longest match first in - # case of overlap. A catch-all cluster can be used to forward commands when - # there is no match. Time complexity of the lookups are in O(min(longest key - # prefix, key length)). Example: .. code-block:: yaml prefix_routes: - # routes: - prefix: "ab" cluster: "cluster_a" - - # prefix: "abc" cluster: "cluster_b" When using the above routes, - # the following prefixes would be sent to: * ``get abc:users`` would retrieve - # the key 'abc:users' from cluster_b. * ``get ab:users`` would retrieve the - # key 'ab:users' from cluster_a. * ``get z:users`` would return a - # NoUpstreamHost error. A :ref:`catch-all route` - # would have retrieved the key from that cluster instead. See the - # :ref:`configuration section ` of the - # architecture overview for recommendations on configuring the backing - # clusters. - prefix_routes: "RedisProxyPrefixRoutes" = betterproto.message_field(5) - # Authenticate Redis client connections locally by forcing downstream clients - # to issue a `Redis AUTH command `_ with this - # password before enabling any other command. If an AUTH command's password - # matches this password, an "OK" response will be returned to the client. If - # the AUTH command password does not match this password, then an "ERR - # invalid password" error will be returned. If any other command is received - # before AUTH when this password is set, then a "NOAUTH Authentication - # required." error response will be sent to the client. If an AUTH command is - # received when the password is not set, then an "ERR Client sent AUTH, but - # no password is set" error will be returned. - downstream_auth_password: "_____config_core_v3__.DataSource" = ( - betterproto.message_field(6) - ) - # List of faults to inject. Faults currently come in two flavors: - Delay, - # which delays a request. - Error, which responds to a request with an error. - # Errors can also have delays attached. Example: .. code-block:: yaml - # faults: - fault_type: ERROR fault_enabled: default_value: - # numerator: 10 denominator: HUNDRED runtime_key: "bogus_key" - # commands: - GET - fault_type: DELAY fault_enabled: - # default_value: numerator: 10 denominator: HUNDRED - # runtime_key: "bogus_key" delay: 2s See the :ref:`fault injection - # section ` for more - # information on how to configure this. - faults: List["RedisProxyRedisFault"] = betterproto.message_field(8) - # If a username is provided an ACL style AUTH command will be required with a - # username and password. Authenticate Redis client connections locally by - # forcing downstream clients to issue a `Redis AUTH command - # `_ with this username and the - # *downstream_auth_password* before enabling any other command. If an AUTH - # command's username and password matches this username and the - # *downstream_auth_password* , an "OK" response will be returned to the - # client. If the AUTH command username or password does not match this - # username or the *downstream_auth_password*, then an "WRONGPASS invalid - # username-password pair" error will be returned. If any other command is - # received before AUTH when this password is set, then a "NOAUTH - # Authentication required." error response will be sent to the client. If an - # AUTH command is received when the password is not set, then an "ERR Client - # sent AUTH, but no ACL is set" error will be returned. - downstream_auth_username: "_____config_core_v3__.DataSource" = ( - betterproto.message_field(7) - ) - - -@dataclass(eq=False, repr=False) -class RedisProxyConnPoolSettings(betterproto.Message): - """Redis connection pool settings. [#next-free-field: 9]""" - - # Per-operation timeout in milliseconds. The timer starts when the first - # command of a pipeline is written to the backend connection. Each response - # received from Redis resets the timer since it signifies that the next - # command is being processed by the backend. The only exception to this - # behavior is when a connection to a backend is not yet established. In that - # case, the connect timeout on the cluster will govern the timeout until the - # connection is ready. - op_timeout: timedelta = betterproto.message_field(1) - # Use hash tagging on every redis key to guarantee that keys with the same - # hash tag will be forwarded to the same upstream. The hash key used for - # determining the upstream in a consistent hash ring configuration will be - # computed from the hash tagged key instead of the whole key. The algorithm - # used to compute the hash tag is identical to the `redis-cluster - # implementation `_. - # Examples: * '{user1000}.following' and '{user1000}.followers' **will** be - # sent to the same upstream * '{user1000}.following' and - # '{user1001}.following' **might** be sent to the same upstream - enable_hashtagging: bool = betterproto.bool_field(2) - # Accept `moved and ask redirection `_ errors from upstream redis servers, and - # retry commands to the specified target server. The target server does not - # need to be known to the cluster manager. If the command cannot be - # redirected, then the original error is passed downstream unchanged. By - # default, this support is not enabled. - enable_redirection: bool = betterproto.bool_field(3) - # Maximum size of encoded request buffer before flush is triggered and - # encoded requests are sent upstream. If this is unset, the buffer flushes - # whenever it receives data and performs no batching. This feature makes it - # possible for multiple clients to send requests to Envoy and have them - # batched- for example if one is running several worker processes, each with - # its own Redis connection. There is no benefit to using this with a single - # downstream process. Recommended size (if enabled) is 1024 bytes. - max_buffer_size_before_flush: int = betterproto.uint32_field(4) - # The encoded request buffer is flushed N milliseconds after the first - # request has been encoded, unless the buffer size has already exceeded - # `max_buffer_size_before_flush`. If `max_buffer_size_before_flush` is not - # set, this flush timer is not used. Otherwise, the timer should be set - # according to the number of clients, overall request rate and desired - # maximum latency for a single command. For example, if there are many - # requests being batched together at a high rate, the buffer will likely be - # filled before the timer fires. Alternatively, if the request rate is lower - # the buffer will not be filled as often before the timer fires. If - # `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, - # the latter defaults to 3ms. - buffer_flush_timeout: timedelta = betterproto.message_field(5) - # `max_upstream_unknown_connections` controls how many upstream connections - # to unknown hosts can be created at any given time by any given worker - # thread (see `enable_redirection` for more details). If the host is unknown - # and a connection cannot be created due to enforcing this limit, then - # redirection will fail and the original redirection error will be passed - # downstream unchanged. This limit defaults to 100. - max_upstream_unknown_connections: Optional[int] = betterproto.message_field( - 6, wraps=betterproto.TYPE_UINT32 - ) - # Enable per-command statistics per upstream cluster, in addition to the - # filter level aggregate count. These commands are measured in microseconds. - enable_command_stats: bool = betterproto.bool_field(8) - # Read policy. The default is to read from the primary. - read_policy: "RedisProxyConnPoolSettingsReadPolicy" = betterproto.enum_field(7) - - -@dataclass(eq=False, repr=False) -class RedisProxyPrefixRoutes(betterproto.Message): - # List of prefix routes. - routes: List["RedisProxyPrefixRoutesRoute"] = betterproto.message_field(1) - # Indicates that prefix matching should be case insensitive. - case_insensitive: bool = betterproto.bool_field(2) - # Optional catch-all route to forward commands that doesn't match any of the - # routes. The catch-all route becomes required when no routes are specified. - catch_all_route: "RedisProxyPrefixRoutesRoute" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RedisProxyPrefixRoutesRoute(betterproto.Message): - # String prefix that must match the beginning of the keys. Envoy will always - # favor the longest match. - prefix: str = betterproto.string_field(1) - # Indicates if the prefix needs to be removed from the key when forwarded. - remove_prefix: bool = betterproto.bool_field(2) - # Upstream cluster to forward the command to. - cluster: str = betterproto.string_field(3) - # Indicates that the route has a request mirroring policy. - request_mirror_policy: List[ - "RedisProxyPrefixRoutesRouteRequestMirrorPolicy" - ] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RedisProxyPrefixRoutesRouteRequestMirrorPolicy(betterproto.Message): - """ - The router is capable of shadowing traffic from one cluster to another. The - current implementation is "fire and forget," meaning Envoy will not wait - for the shadow cluster to respond before returning the response from the - primary cluster. All normal statistics are collected for the shadow cluster - making this feature useful for testing. - """ - - # Specifies the cluster that requests will be mirrored to. The cluster must - # exist in the cluster manager configuration. - cluster: str = betterproto.string_field(1) - # If not specified or the runtime key is not present, all requests to the - # target cluster will be mirrored. If specified, Envoy will lookup the - # runtime key to get the percentage of requests to the mirror. - runtime_fraction: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(2) - ) - # Set this to TRUE to only mirror write commands, this is effectively - # replicating the writes in a "fire and forget" manner. - exclude_read_commands: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class RedisProxyRedisFault(betterproto.Message): - """RedisFault defines faults used for fault injection.""" - - # Fault type. - fault_type: "RedisProxyRedisFaultRedisFaultType" = betterproto.enum_field(1) - # Percentage of requests fault applies to. - fault_enabled: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(2) - ) - # Delay for all faults. If not set, defaults to zero - delay: timedelta = betterproto.message_field(3) - # Commands fault is restricted to, if any. If not set, fault applies to all - # commands other than auth and ping (due to special handling of those - # commands in Envoy). - commands: List[str] = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class RedisProtocolOptions(betterproto.Message): - """ - RedisProtocolOptions specifies Redis upstream protocol options. This object - is used in :ref:`typed_extension_protocol_options`, keyed by the name - `envoy.filters.network.redis_proxy`. - """ - - # Upstream server password as defined by the `requirepass` directive - # `_ in the server's configuration file. - auth_password: "_____config_core_v3__.DataSource" = betterproto.message_field(1) - # Upstream server username as defined by the `user` directive - # `_ in the server's configuration file. - auth_username: "_____config_core_v3__.DataSource" = betterproto.message_field(2) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/sni_cluster/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/sni_cluster/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/sni_cluster/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/sni_cluster/v3/__init__.py deleted file mode 100644 index d11a231..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/sni_cluster/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class SniCluster(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/__init__.py deleted file mode 100644 index 2b8a9c3..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FilterConfig(betterproto.Message): - """ - Configuration for the SNI-based dynamic forward proxy filter. See the - :ref:`architecture overview ` for - more information. Note this filter must be configured along with :ref:`TLS - inspector listener filter ` to work. - [#extension: envoy.filters.network.sni_dynamic_forward_proxy] - """ - - # The DNS cache configuration that the filter will attach to. Note this - # configuration must match that of associated :ref:`dynamic forward proxy - # cluster configuration `. - dns_cache_config: "____common_dynamic_forward_proxy_v3__.DnsCacheConfig" = ( - betterproto.message_field(1) - ) - # The port number to connect to the upstream. - port_value: int = betterproto.uint32_field(2, group="port_specifier") - - -from .....common.dynamic_forward_proxy import ( - v3 as ____common_dynamic_forward_proxy_v3__, -) diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/tcp_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/tcp_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/tcp_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/tcp_proxy/v3/__init__.py deleted file mode 100644 index 0d4ebfb..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/tcp_proxy/v3/__init__.py +++ /dev/null @@ -1,130 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class TcpProxy(betterproto.Message): - """[#next-free-field: 14]""" - - # The prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # The upstream cluster to connect to. - cluster: str = betterproto.string_field(2, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. - weighted_clusters: "TcpProxyWeightedCluster" = betterproto.message_field( - 10, group="cluster_specifier" - ) - # Optional endpoint metadata match criteria. Only endpoints in the upstream - # cluster with metadata matching that set in metadata_match will be - # considered. The filter name should be specified as *envoy.lb*. - metadata_match: "_____config_core_v3__.Metadata" = betterproto.message_field(9) - # The idle timeout for connections managed by the TCP proxy filter. The idle - # timeout is defined as the period in which there are no bytes sent or - # received on either the upstream or downstream connection. If not set, the - # default idle timeout is 1 hour. If set to 0s, the timeout will be disabled. - # .. warning:: Disabling this timeout has a highly likelihood of yielding - # connection leaks due to lost TCP FIN packets, etc. - idle_timeout: timedelta = betterproto.message_field(8) - # [#not-implemented-hide:] The idle timeout for connections managed by the - # TCP proxy filter. The idle timeout is defined as the period in which there - # is no active traffic. If not set, there is no idle timeout. When the idle - # timeout is reached the connection will be closed. The distinction between - # downstream_idle_timeout/upstream_idle_timeout provides a means to set - # timeout based on the last byte sent on the downstream/upstream connection. - downstream_idle_timeout: timedelta = betterproto.message_field(3) - # [#not-implemented-hide:] - upstream_idle_timeout: timedelta = betterproto.message_field(4) - # Configuration for :ref:`access logs ` emitted by - # the this tcp_proxy. - access_log: List[ - "_____config_accesslog_v3__.AccessLog" - ] = betterproto.message_field(5) - # The maximum number of unsuccessful connection attempts that will be made - # before giving up. If the parameter is not specified, 1 connection attempt - # will be made. - max_connect_attempts: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - # Optional configuration for TCP proxy hash policy. If hash_policy is not - # set, the hash-based load balancing algorithms will select a host randomly. - # Currently the number of hash policies is limited to 1. - hash_policy: List["_____type_v3__.HashPolicy"] = betterproto.message_field(11) - # If set, this configures tunneling, e.g. configuration options to tunnel TCP - # payload over HTTP CONNECT. If this message is absent, the payload will be - # proxied upstream as per usual. - tunneling_config: "TcpProxyTunnelingConfig" = betterproto.message_field(12) - # The maximum duration of a connection. The duration is defined as the period - # since a connection was established. If not set, there is no max duration. - # When max_downstream_connection_duration is reached the connection will be - # closed. Duration must be at least 1ms. - max_downstream_connection_duration: timedelta = betterproto.message_field(13) - - -@dataclass(eq=False, repr=False) -class TcpProxyWeightedCluster(betterproto.Message): - """ - Allows for specification of multiple upstream clusters along with weights - that indicate the percentage of traffic to be forwarded to each cluster. - The router selects an upstream cluster based on these weights. - """ - - # Specifies one or more upstream clusters associated with the route. - clusters: List["TcpProxyWeightedClusterClusterWeight"] = betterproto.message_field( - 1 - ) - - -@dataclass(eq=False, repr=False) -class TcpProxyWeightedClusterClusterWeight(betterproto.Message): - # Name of the upstream cluster. - name: str = betterproto.string_field(1) - # When a request matches the route, the choice of an upstream cluster is - # determined by its weight. The sum of weights across all entries in the - # clusters array determines the total weight. - weight: int = betterproto.uint32_field(2) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field will be considered for load balancing. Note that this will be - # merged with what's provided in :ref:`TcpProxy.metadata_match `, - # with values here taking precedence. The filter name should be specified as - # *envoy.lb*. - metadata_match: "_____config_core_v3__.Metadata" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class TcpProxyTunnelingConfig(betterproto.Message): - """ - Configuration for tunneling TCP over other transports or application - layers. Tunneling is supported over both HTTP/1.1 and HTTP/2. Upstream - protocol is determined by the cluster configuration. - """ - - # The hostname to send in the synthesized CONNECT headers to the upstream - # proxy. - hostname: str = betterproto.string_field(1) - # Use POST method instead of CONNECT method to tunnel the TCP stream. The - # 'protocol: bytestream' header is also NOT set for HTTP/2 to comply with the - # spec. The upstream proxy is expected to convert POST payload as raw TCP. - use_post: bool = betterproto.bool_field(2) - # Additional request headers to upstream proxy. This is mainly used to - # trigger upstream to convert POST requests back to CONNECT requests. Neither - # *:-prefixed* pseudo-headers nor the Host: header can be overridden. - headers_to_add: List[ - "_____config_core_v3__.HeaderValueOption" - ] = betterproto.message_field(3) - - -from ......config.accesslog import v3 as _____config_accesslog_v3__ -from ......config.core import v3 as _____config_core_v3__ -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/v3/__init__.py deleted file mode 100644 index 9039b89..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/v3/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/thrift_proxy/filters/header_to_metadata/v3/header_to_metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class HeaderToMetadataValueType(betterproto.Enum): - STRING = 0 - NUMBER = 1 - PROTOBUF_VALUE = 2 - - -class HeaderToMetadataValueEncode(betterproto.Enum): - NONE = 0 - BASE64 = 1 - - -@dataclass(eq=False, repr=False) -class HeaderToMetadata(betterproto.Message): - # The list of rules to apply to requests. - request_rules: List["HeaderToMetadataRule"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HeaderToMetadataKeyValuePair(betterproto.Message): - """[#next-free-field: 7]""" - - # The namespace — if this is empty, the filter's namespace will be used. - metadata_namespace: str = betterproto.string_field(1) - # The key to use within the namespace. - key: str = betterproto.string_field(2) - # The value to pair with the given key. When used for on_present case, if - # value is non-empty it'll be used instead of the header value. If both are - # empty, no metadata is added. When used for on_missing case, a non-empty - # value must be provided otherwise no metadata is added. - value: str = betterproto.string_field(3, group="value_type") - # If present, the header's value will be matched and substituted with this. - # If there is no match or substitution, the header value is used as-is. This - # is only used for on_present. Note: if the `value` field is non-empty this - # field should be empty. - regex_value_rewrite: "_______type_matcher_v3__.RegexMatchAndSubstitute" = ( - betterproto.message_field(4, group="value_type") - ) - # The value's type — defaults to string. - type: "HeaderToMetadataValueType" = betterproto.enum_field(5) - # How is the value encoded, default is NONE (not encoded). The value will be - # decoded accordingly before storing to metadata. - encode: "HeaderToMetadataValueEncode" = betterproto.enum_field(6) - - -@dataclass(eq=False, repr=False) -class HeaderToMetadataRule(betterproto.Message): - """ - A Rule defines what metadata to apply when a header is present or missing. - """ - - # Specifies that a match will be performed on the value of a header. The - # header to be extracted. - header: str = betterproto.string_field(1) - # If the header is present, apply this metadata KeyValuePair. If the value in - # the KeyValuePair is non-empty, it'll be used instead of the header value. - on_present: "HeaderToMetadataKeyValuePair" = betterproto.message_field(2) - # If the header is not present, apply this metadata KeyValuePair. The value - # in the KeyValuePair must be set, since it'll be used in lieu of the missing - # header value. - on_missing: "HeaderToMetadataKeyValuePair" = betterproto.message_field(3) - # Whether or not to remove the header after a rule is applied. This prevents - # headers from leaking. - remove: bool = betterproto.bool_field(4) - - -from ........type.matcher import v3 as _______type_matcher_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/__init__.py deleted file mode 100644 index 7998c91..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RateLimit(betterproto.Message): - """[#next-free-field: 6]""" - - # The rate limit domain to use in the rate limit service request. - domain: str = betterproto.string_field(1) - # Specifies the rate limit configuration stage. Each configured rate limit - # filter performs a rate limit check using descriptors configured in the :ref - # :`envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` - # for the request. Only those entries with a matching stage number are used - # for a given filter. If not set, the default stage number is 0. .. note:: - # The filter supports a range of 0 - 10 inclusively for stage numbers. - stage: int = betterproto.uint32_field(2) - # The timeout in milliseconds for the rate limit service RPC. If not set, - # this defaults to 20ms. - timeout: timedelta = betterproto.message_field(3) - # The filter's behaviour in case the rate limiting service does not respond - # back. When it is set to true, Envoy will not allow traffic in case of - # communication failure between rate limiting service and the proxy. Defaults - # to false. - failure_mode_deny: bool = betterproto.bool_field(4) - # Configuration for an external rate limit service provider. If not - # specified, any calls to the rate limit service will immediately return - # success. - rate_limit_service: "_______config_ratelimit_v3__.RateLimitServiceConfig" = ( - betterproto.message_field(5) - ) - - -from ........config.ratelimit import v3 as _______config_ratelimit_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/router/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/router/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/router/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/router/v3/__init__.py deleted file mode 100644 index 3d0c517..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/router/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Router(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/v3/__init__.py deleted file mode 100644 index d65076d..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/thrift_proxy/v3/__init__.py +++ /dev/null @@ -1,264 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/thrift_proxy/v3/route.proto, envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class TransportType(betterproto.Enum): - """Thrift transport types supported by Envoy.""" - - # For downstream connections, the Thrift proxy will attempt to determine - # which transport to use. For upstream connections, the Thrift proxy will use - # same transport as the downstream connection. - AUTO_TRANSPORT = 0 - # The Thrift proxy will use the Thrift framed transport. - FRAMED = 1 - # The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2 - # The Thrift proxy will assume the client is using the Thrift header - # transport. - HEADER = 3 - - -class ProtocolType(betterproto.Enum): - """Thrift Protocol types supported by Envoy.""" - - # For downstream connections, the Thrift proxy will attempt to determine - # which protocol to use. Note that the older, non-strict (or lax) binary - # protocol is not included in automatic protocol detection. For upstream - # connections, the Thrift proxy will use the same protocol as the downstream - # connection. - AUTO_PROTOCOL = 0 - # The Thrift proxy will use the Thrift binary protocol. - BINARY = 1 - # The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2 - # The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3 - # The Thrift proxy will use the Thrift "Twitter" protocol implemented by the - # finagle library. - TWITTER = 4 - - -@dataclass(eq=False, repr=False) -class RouteConfiguration(betterproto.Message): - # The name of the route configuration. Reserved for future use in - # asynchronous route discovery. - name: str = betterproto.string_field(1) - # The list of routes that will be matched, in order, against incoming - # requests. The first route that matches will be used. - routes: List["Route"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Route(betterproto.Message): - # Route matching parameters. - match: "RouteMatch" = betterproto.message_field(1) - # Route request to some upstream cluster. - route: "RouteAction" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class RouteMatch(betterproto.Message): - # If specified, the route must exactly match the request method name. As a - # special case, an empty string matches any request method name. - method_name: str = betterproto.string_field(1, group="match_specifier") - # If specified, the route must have the service name as the request method - # name prefix. As a special case, an empty string matches any service name. - # Only relevant when service multiplexing. - service_name: str = betterproto.string_field(2, group="match_specifier") - # Inverts whatever matching is done in the :ref:`method_name ` or - # :ref:`service_name ` fields. Cannot be combined with wildcard - # matching as that would result in routes never being matched. .. note:: - # This does not invert matching done as part of the :ref:`headers field ` field. To invert header matching, see :ref:`invert_match - # `. - invert: bool = betterproto.bool_field(3) - # Specifies a set of headers that the route should match on. The router will - # check the request’s headers against all the specified headers in the route - # config. A match will happen if all the headers in the route are present in - # the request with the same values (or based on presence if the value field - # is not in the config). Note that this only applies for Thrift transports - # and/or protocols that support headers. - headers: List["_____config_route_v3__.HeaderMatcher"] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RouteAction(betterproto.Message): - """[#next-free-field: 8]""" - - # Indicates a single upstream cluster to which the request should be routed - # to. - cluster: str = betterproto.string_field(1, group="cluster_specifier") - # Multiple upstream clusters can be specified for a given route. The request - # is routed to one of the upstream clusters based on weights assigned to each - # cluster. - weighted_clusters: "WeightedCluster" = betterproto.message_field( - 2, group="cluster_specifier" - ) - # Envoy will determine the cluster to route to by reading the value of the - # Thrift header named by cluster_header from the request headers. If the - # header is not found or the referenced cluster does not exist Envoy will - # respond with an unknown method exception or an internal error exception, - # respectively. - cluster_header: str = betterproto.string_field(6, group="cluster_specifier") - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field will be considered. Note that this will be merged with what's - # provided in :ref:`WeightedCluster.metadata_match `, with values there taking precedence. Keys and values should be - # provided under the "envoy.lb" metadata key. - metadata_match: "_____config_core_v3__.Metadata" = betterproto.message_field(3) - # Specifies a set of rate limit configurations that could be applied to the - # route. N.B. Thrift service or method name matching can be achieved by - # specifying a RequestHeaders action with the header name ":method-name". - rate_limits: List["_____config_route_v3__.RateLimit"] = betterproto.message_field(4) - # Strip the service prefix from the method name, if there's a prefix. For - # example, the method call Service:method would end up being just method. - strip_service_name: bool = betterproto.bool_field(5) - # Indicates that the route has request mirroring policies. - request_mirror_policies: List[ - "RouteActionRequestMirrorPolicy" - ] = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class RouteActionRequestMirrorPolicy(betterproto.Message): - """ - The router is capable of shadowing traffic from one cluster to another. The - current implementation is "fire and forget," meaning Envoy will not wait - for the shadow cluster to respond before returning the response from the - primary cluster. All normal statistics are collected for the shadow cluster - making this feature useful for testing. .. note:: Shadowing will not be - triggered if the primary cluster does not exist. - """ - - # Specifies the cluster that requests will be mirrored to. The cluster must - # exist in the cluster manager configuration when the route configuration is - # loaded. If it disappears at runtime, the shadow request will silently be - # ignored. - cluster: str = betterproto.string_field(1) - # If not specified, all requests to the target cluster will be mirrored. For - # some fraction N/D, a random number in the range [0,D) is selected. If the - # number is <= the value of the numerator N, or if the key is not present, - # the default value, the request will be mirrored. - runtime_fraction: "_____config_core_v3__.RuntimeFractionalPercent" = ( - betterproto.message_field(2) - ) - - -@dataclass(eq=False, repr=False) -class WeightedCluster(betterproto.Message): - """ - Allows for specification of multiple upstream clusters along with weights - that indicate the percentage of traffic to be forwarded to each cluster. - The router selects an upstream cluster based on these weights. - """ - - # Specifies one or more upstream clusters associated with the route. - clusters: List["WeightedClusterClusterWeight"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class WeightedClusterClusterWeight(betterproto.Message): - # Name of the upstream cluster. - name: str = betterproto.string_field(1) - # When a request matches the route, the choice of an upstream cluster is - # determined by its weight. The sum of weights across all entries in the - # clusters array determines the total weight. - weight: Optional[int] = betterproto.message_field(2, wraps=betterproto.TYPE_UINT32) - # Optional endpoint metadata match criteria used by the subset load balancer. - # Only endpoints in the upstream cluster with metadata matching what is set - # in this field, combined with what's provided in :ref:`RouteAction's - # metadata_match `, will be considered. Values here will take - # precedence. Keys and values should be provided under the "envoy.lb" - # metadata key. - metadata_match: "_____config_core_v3__.Metadata" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ThriftProxy(betterproto.Message): - """[#next-free-field: 8]""" - - # Supplies the type of transport that the Thrift proxy should use. Defaults - # to :ref:`AUTO_TRANSPORT`. - transport: "TransportType" = betterproto.enum_field(2) - # Supplies the type of protocol that the Thrift proxy should use. Defaults to - # :ref:`AUTO_PROTOCOL`. - protocol: "ProtocolType" = betterproto.enum_field(3) - # The human readable prefix to use when emitting statistics. - stat_prefix: str = betterproto.string_field(1) - # The route table for the connection manager is static and is specified in - # this property. - route_config: "RouteConfiguration" = betterproto.message_field(4) - # A list of individual Thrift filters that make up the filter chain for - # requests made to the Thrift proxy. Order matters as the filters are - # processed sequentially. For backwards compatibility, if no thrift_filters - # are specified, a default Thrift router filter - # (`envoy.filters.thrift.router`) is used. [#extension-category: - # envoy.thrift_proxy.filters] - thrift_filters: List["ThriftFilter"] = betterproto.message_field(5) - # If set to true, Envoy will try to skip decode data after metadata in the - # Thrift message. This mode will only work if the upstream and downstream - # protocols are the same and the transports are Framed or Header, and the - # protocol is not Twitter. Otherwise Envoy will fallback to decode the data. - payload_passthrough: bool = betterproto.bool_field(6) - # Optional maximum requests for a single downstream connection. If not - # specified, there is no limit. - max_requests_per_connection: Optional[int] = betterproto.message_field( - 7, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class ThriftFilter(betterproto.Message): - """ThriftFilter configures a Thrift filter.""" - - # The name of the filter to instantiate. The name must match a supported - # filter. The built-in filters are: [#comment:TODO(zuercher): Auto generate - # the following list] * :ref:`envoy.filters.thrift.router - # ` * :ref:`envoy.filters.thrift.rate_limit - # ` - name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class ThriftProtocolOptions(betterproto.Message): - """ - ThriftProtocolOptions specifies Thrift upstream protocol options. This - object is used in in :ref:`typed_extension_protocol_options`, keyed by - the name `envoy.filters.network.thrift_proxy`. - """ - - # Supplies the type of transport that the Thrift proxy should use for - # upstream connections. Selecting :ref:`AUTO_TRANSPORT` - # , which is the default, causes the proxy to use the same transport as the - # downstream connection. - transport: "TransportType" = betterproto.enum_field(1) - # Supplies the type of protocol that the Thrift proxy should use for upstream - # connections. Selecting :ref:`AUTO_PROTOCOL`, which is - # the default, causes the proxy to use the same protocol as the downstream - # connection. - protocol: "ProtocolType" = betterproto.enum_field(2) - - -from ......config.core import v3 as _____config_core_v3__ -from ......config.route import v3 as _____config_route_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/wasm/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/wasm/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/wasm/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/wasm/v3/__init__.py deleted file mode 100644 index 088ea5d..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/wasm/v3/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/wasm/v3/wasm.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Wasm(betterproto.Message): - # General Plugin configuration. - config: "____wasm_v3__.PluginConfig" = betterproto.message_field(1) - - -from .....wasm import v3 as ____wasm_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/zookeeper_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/zookeeper_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/network/zookeeper_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/network/zookeeper_proxy/v3/__init__.py deleted file mode 100644 index 8bb7283..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/network/zookeeper_proxy/v3/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ZooKeeperProxy(betterproto.Message): - # The human readable prefix to use when emitting :ref:`statistics - # `. - stat_prefix: str = betterproto.string_field(1) - # [#not-implemented-hide:] The optional path to use for writing ZooKeeper - # access logs. If the access log field is empty, access logs will not be - # written. - access_log: str = betterproto.string_field(2) - # Messages — requests, responses and events — that are bigger than this value - # will be ignored. If it is not set, the default value is 1Mb. The value here - # should match the jute.maxbuffer property in your cluster configuration: - # https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options - # if that is set. If it isn't, ZooKeeper's default is also 1Mb. - max_packet_bytes: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) diff --git a/src/envoy_data_plane/envoy/extensions/filters/udp/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/udp/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/udp/dns_filter/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/udp/dns_filter/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/udp/dns_filter/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/udp/dns_filter/v3/__init__.py deleted file mode 100644 index b96e7c6..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/udp/dns_filter/v3/__init__.py +++ /dev/null @@ -1,116 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class DnsFilterConfig(betterproto.Message): - """Configuration for the DNS filter.""" - - # The stat prefix used when emitting DNS filter statistics - stat_prefix: str = betterproto.string_field(1) - # Server context configuration contains the data that the filter uses to - # respond to DNS requests. - server_config: "DnsFilterConfigServerContextConfig" = betterproto.message_field(2) - # Client context configuration controls Envoy's behavior when it must use - # external resolvers to answer a query. This object is optional and if - # omitted instructs the filter to resolve queries from the data in the - # server_config - client_config: "DnsFilterConfigClientContextConfig" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class DnsFilterConfigServerContextConfig(betterproto.Message): - """ - This message contains the configuration for the DNS Filter operating in a - server context. This message will contain the virtual hosts and associated - addresses with which Envoy will respond to queries - """ - - # Load the configuration specified from the control plane - inline_dns_table: "_____data_dns_v3__.DnsTable" = betterproto.message_field( - 1, group="config_source" - ) - # Seed the filter configuration from an external path. This source is a yaml - # formatted file that contains the DnsTable driving Envoy's responses to DNS - # queries - external_dns_table: "_____config_core_v3__.DataSource" = betterproto.message_field( - 2, group="config_source" - ) - - -@dataclass(eq=False, repr=False) -class DnsFilterConfigClientContextConfig(betterproto.Message): - """ - This message contains the configuration for the DNS Filter operating in a - client context. This message will contain the timeouts, retry, and - forwarding configuration for Envoy to make DNS requests to other resolvers - [#next-free-field: 6] - """ - - # Sets the maximum time we will wait for the upstream query to complete We - # allow 5s for the upstream resolution to complete, so the minimum value here - # is 1. Note that the total latency for a failed query is the number of - # retries multiplied by the resolver_timeout. - resolver_timeout: timedelta = betterproto.message_field(1) - # This field was used for `dns_resolution_config` in Envoy 1.19.0 and 1.19.1. - # Control planes that need to set this field for Envoy 1.19.0 and 1.19.1 - # clients should fork the protobufs and change the field type to - # `DnsResolutionConfig`. Control planes that need to simultaneously support - # Envoy 1.18.x and Envoy 1.19.x should avoid Envoy 1.19.0 and 1.19.1. [#not- - # implemented-hide:] - upstream_resolvers: List[ - "_____config_core_v3__.Address" - ] = betterproto.message_field(2) - # DNS resolution configuration which includes the underlying dns resolver - # addresses and options. This field is deprecated in favor of - # :ref:`typed_dns_resolver_config `. - dns_resolution_config: "_____config_core_v3__.DnsResolutionConfig" = ( - betterproto.message_field(5) - ) - # DNS resolver type configuration extension. This extension can be used to - # configure c-ares, apple, or any other DNS resolver types and the related - # parameters. For example, an object of :ref:`CaresDnsResolverConfig ` - # can be packed into this *typed_dns_resolver_config*. This configuration - # replaces the :ref:`dns_resolution_config ` configuration. During the transition period when both - # *dns_resolution_config* and *typed_dns_resolver_config* exists, when - # *typed_dns_resolver_config* is in place, Envoy will use it and ignore - # *dns_resolution_config*. When *typed_dns_resolver_config* is missing, the - # default behavior is in place. [#extension-category: - # envoy.network.dns_resolver] - typed_dns_resolver_config: "_____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(4) - ) - # Controls how many outstanding external lookup contexts the filter tracks. - # The context structure allows the filter to respond to every query even if - # the external resolution times out or is otherwise unsuccessful - max_pending_lookups: int = betterproto.uint64_field(3) - - def __post_init__(self) -> None: - super().__post_init__() - if self.upstream_resolvers: - warnings.warn( - "DnsFilterConfigClientContextConfig.upstream_resolvers is deprecated", - DeprecationWarning, - ) - if self.dns_resolution_config: - warnings.warn( - "DnsFilterConfigClientContextConfig.dns_resolution_config is deprecated", - DeprecationWarning, - ) - - -from ......config.core import v3 as _____config_core_v3__ -from ......data.dns import v3 as _____data_dns_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/filters/udp/udp_proxy/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/udp/udp_proxy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/filters/udp/udp_proxy/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/filters/udp/udp_proxy/v3/__init__.py deleted file mode 100644 index c90d81d..0000000 --- a/src/envoy_data_plane/envoy/extensions/filters/udp/udp_proxy/v3/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class UdpProxyConfig(betterproto.Message): - """Configuration for the UDP proxy filter. [#next-free-field: 8]""" - - # The stat prefix used when emitting UDP proxy filter stats. - stat_prefix: str = betterproto.string_field(1) - # The upstream cluster to connect to. - cluster: str = betterproto.string_field(2, group="route_specifier") - # The idle timeout for sessions. Idle is defined as no datagrams between - # received or sent by the session. The default if not specified is 1 minute. - idle_timeout: timedelta = betterproto.message_field(3) - # Use the remote downstream IP address as the sender IP address when sending - # packets to upstream hosts. This option requires Envoy to be run with the - # *CAP_NET_ADMIN* capability on Linux. And the IPv6 stack must be enabled on - # Linux kernel. This option does not preserve the remote downstream port. If - # this option is enabled, the IP address of sent datagrams will be changed to - # the remote downstream IP address. This means that Envoy will not receive - # packets that are sent by upstream hosts because the upstream hosts will - # send the packets with the remote downstream IP address as the destination. - # All packets will be routed to the remote downstream directly if there are - # route rules on the upstream host side. There are two options to return the - # packets back to the remote downstream. The first one is to use DSR (Direct - # Server Return). The other one is to configure routing rules on the upstream - # hosts to forward all packets back to Envoy and configure iptables rules on - # the host running Envoy to forward all packets from upstream hosts to the - # Envoy process so that Envoy can forward the packets to the downstream. If - # the platform does not support this option, Envoy will raise a configuration - # error. - use_original_src_ip: bool = betterproto.bool_field(4) - # Optional configuration for UDP proxy hash policies. If hash_policies is not - # set, the hash-based load balancing algorithms will select a host randomly. - # Currently the number of hash policies is limited to 1. - hash_policies: List["UdpProxyConfigHashPolicy"] = betterproto.message_field(5) - # UDP socket configuration for upstream sockets. The default for - # :ref:`prefer_gro - # ` is true for - # upstream sockets as the assumption is datagrams will be received from a - # single source. - upstream_socket_config: "_____config_core_v3__.UdpSocketConfig" = ( - betterproto.message_field(6) - ) - # Perform per packet load balancing (upstream host selection) on each - # received data chunk. The default if not specified is false, that means each - # data chunk is forwarded to upstream host selected on first chunk receival - # for that "session" (identified by source IP/port and local IP/port). - use_per_packet_load_balancing: bool = betterproto.bool_field(7) - - -@dataclass(eq=False, repr=False) -class UdpProxyConfigHashPolicy(betterproto.Message): - """ - Specifies the UDP hash policy. The packets can be routed by hash policy. - """ - - # The source IP will be used to compute the hash used by hash-based load - # balancing algorithms. - source_ip: bool = betterproto.bool_field(1, group="policy_specifier") - # A given key will be used to compute the hash used by hash-based load - # balancing algorithms. In certain cases there is a need to direct different - # UDP streams jointly towards the selected set of endpoints. A possible use- - # case is VoIP telephony, where media (RTP) and its corresponding control - # (RTCP) belong to the same logical session, although they travel in separate - # streams. To ensure that these pair of streams are load-balanced on session - # level (instead of individual stream level), dynamically created listeners - # can use the same hash key for each stream in the session. - key: str = betterproto.string_field(2, group="policy_specifier") - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/formatter/__init__.py b/src/envoy_data_plane/envoy/extensions/formatter/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/formatter/metadata/__init__.py b/src/envoy_data_plane/envoy/extensions/formatter/metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/formatter/metadata/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/formatter/metadata/v3/__init__.py deleted file mode 100644 index e5ae50c..0000000 --- a/src/envoy_data_plane/envoy/extensions/formatter/metadata/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/formatter/metadata/v3/metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Metadata(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/formatter/req_without_query/__init__.py b/src/envoy_data_plane/envoy/extensions/formatter/req_without_query/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/formatter/req_without_query/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/formatter/req_without_query/v3/__init__.py deleted file mode 100644 index b13508f..0000000 --- a/src/envoy_data_plane/envoy/extensions/formatter/req_without_query/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/formatter/req_without_query/v3/req_without_query.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ReqWithoutQuery(betterproto.Message): - """Configuration for the request without query formatter.""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/health_checkers/__init__.py b/src/envoy_data_plane/envoy/extensions/health_checkers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/health_checkers/redis/__init__.py b/src/envoy_data_plane/envoy/extensions/health_checkers/redis/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/health_checkers/redis/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/health_checkers/redis/v3/__init__.py deleted file mode 100644 index e9c4f39..0000000 --- a/src/envoy_data_plane/envoy/extensions/health_checkers/redis/v3/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/health_checkers/redis/v3/redis.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Redis(betterproto.Message): - # If set, optionally perform ``EXISTS `` instead of ``PING``. A return - # value from Redis of 0 (does not exist) is considered a passing healthcheck. - # A return value other than 0 is considered a failure. This allows the user - # to mark a Redis instance for maintenance by setting the specified key to - # any value and waiting for traffic to drain. - key: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/http/__init__.py b/src/envoy_data_plane/envoy/extensions/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/header_formatters/__init__.py b/src/envoy_data_plane/envoy/extensions/http/header_formatters/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/header_formatters/preserve_case/__init__.py b/src/envoy_data_plane/envoy/extensions/http/header_formatters/preserve_case/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/header_formatters/preserve_case/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/http/header_formatters/preserve_case/v3/__init__.py deleted file mode 100644 index 247698c..0000000 --- a/src/envoy_data_plane/envoy/extensions/http/header_formatters/preserve_case/v3/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class PreserveCaseFormatterConfig(betterproto.Message): - """ - Configuration for the preserve case header formatter. See the :ref:`header - casing ` configuration guide for more - information. - """ - - # Allows forwarding reason phrase text. This is off by default, and a - # standard reason phrase is used for a corresponding HTTP response code. - forward_reason_phrase: bool = betterproto.bool_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/__init__.py b/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/custom_header/__init__.py b/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/custom_header/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/custom_header/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/custom_header/v3/__init__.py deleted file mode 100644 index fb015ff..0000000 --- a/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/custom_header/v3/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CustomHeaderConfig(betterproto.Message): - """ - This extension allows for the original downstream remote IP to be detected - by reading the value from a configured header name. If the value is - successfully parsed as an IP, it'll be treated as the effective downstream - remote address and seen as such by all filters. See - :ref:`original_ip_detection_extensions ` for an overview of how extensions operate and what - happens when an extension fails to detect the remote IP. [#extension: - envoy.http.original_ip_detection.custom_header] - """ - - # The header name containing the original downstream remote address, if - # present. Note: in the case of a multi-valued header, only the first value - # is tried and the rest are ignored. - header_name: str = betterproto.string_field(1) - # If set to true, the extension could decide that the detected address should - # be treated as trusted by the HCM. If the address is considered - # :ref:`trusted`, it might be used as input to determine if the - # request is internal (among other things). - allow_extension_to_set_address_as_trusted: bool = betterproto.bool_field(2) - # If this is set, the request will be rejected when detection fails using it - # as the HTTP response status. .. note:: If this is set to < 400 or > 511, - # the default status 403 will be used instead. - reject_with_status: "_____type_v3__.HttpStatus" = betterproto.message_field(3) - - -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/xff/__init__.py b/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/xff/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/xff/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/xff/v3/__init__.py deleted file mode 100644 index 3a5488d..0000000 --- a/src/envoy_data_plane/envoy/extensions/http/original_ip_detection/xff/v3/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/http/original_ip_detection/xff/v3/xff.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class XffConfig(betterproto.Message): - """ - This extension allows for the original downstream remote IP to be detected - by reading the :ref:`config_http_conn_man_headers_x-forwarded-for` header. - [#extension: envoy.http.original_ip_detection.xff] - """ - - # The number of additional ingress proxy hops from the right side of the - # :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust - # when determining the origin client's IP address. The default is zero if - # this option is not specified. See the documentation for - # :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - xff_num_trusted_hops: int = betterproto.uint32_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/http/stateful_session/__init__.py b/src/envoy_data_plane/envoy/extensions/http/stateful_session/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/stateful_session/cookie/__init__.py b/src/envoy_data_plane/envoy/extensions/http/stateful_session/cookie/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/http/stateful_session/cookie/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/http/stateful_session/cookie/v3/__init__.py deleted file mode 100644 index 559335a..0000000 --- a/src/envoy_data_plane/envoy/extensions/http/stateful_session/cookie/v3/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/http/stateful_session/cookie/v3/cookie.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CookieBasedSessionState(betterproto.Message): - """ - This extension allows the session state to be tracked via cookies. This - extension first encodes the address of the upstream host selected by the - load balancer into a `set-cookie` response header with the :ref:`cookie - configuration `. when new requests are incoming, this - extension will try to parse the specific upstream host address by the - cookie name. If the address parsed from the cookie corresponds to a valid - upstream host, this upstream host will be selected first. See - :ref:`stateful session filter `. For example, if the cookie name is set - to `sticky-host`, envoy will prefer `1.2.3.4:80` as the upstream host when - the request contains the following header: .. code-block:: none cookie: - sticky-host="MS4yLjMuNDo4MA==" When processing the upstream response, if - `1.2.3.4:80` is indeed the final choice the extension does nothing. If - `1.2.3.4:80` is not the final choice, the new selected host will be used to - update the cookie (via the `set-cookie` response header). [#extension: - envoy.http.stateful_session.cookie] - """ - - # The cookie configuration used to track session state. - cookie: "_____type_http_v3__.Cookie" = betterproto.message_field(1) - - -from ......type.http import v3 as _____type_http_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/internal_redirect/__init__.py b/src/envoy_data_plane/envoy/extensions/internal_redirect/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/internal_redirect/allow_listed_routes/__init__.py b/src/envoy_data_plane/envoy/extensions/internal_redirect/allow_listed_routes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/internal_redirect/allow_listed_routes/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/internal_redirect/allow_listed_routes/v3/__init__.py deleted file mode 100644 index 954449d..0000000 --- a/src/envoy_data_plane/envoy/extensions/internal_redirect/allow_listed_routes/v3/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class AllowListedRoutesConfig(betterproto.Message): - """ - An internal redirect predicate that accepts only explicitly allowed target - routes. [#extension: - envoy.internal_redirect_predicates.allow_listed_routes] - """ - - # The list of routes that's allowed as redirect target by this predicate, - # identified by the route's :ref:`name - # `. Empty route names are - # not allowed. - allowed_route_names: List[str] = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/internal_redirect/previous_routes/__init__.py b/src/envoy_data_plane/envoy/extensions/internal_redirect/previous_routes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/internal_redirect/previous_routes/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/internal_redirect/previous_routes/v3/__init__.py deleted file mode 100644 index 5939941..0000000 --- a/src/envoy_data_plane/envoy/extensions/internal_redirect/previous_routes/v3/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class PreviousRoutesConfig(betterproto.Message): - """ - An internal redirect predicate that rejects redirect targets that are - pointing to a route that has been followed by a previous redirect from the - current route. [#extension: - envoy.internal_redirect_predicates.previous_routes] - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/internal_redirect/safe_cross_scheme/__init__.py b/src/envoy_data_plane/envoy/extensions/internal_redirect/safe_cross_scheme/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/internal_redirect/safe_cross_scheme/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/internal_redirect/safe_cross_scheme/v3/__init__.py deleted file mode 100644 index 0cc713a..0000000 --- a/src/envoy_data_plane/envoy/extensions/internal_redirect/safe_cross_scheme/v3/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class SafeCrossSchemeConfig(betterproto.Message): - """ - An internal redirect predicate that checks the scheme between the - downstream url and the redirect target url and allows a) same scheme - redirect and b) safe cross scheme redirect, which means if the downstream - scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if - the downstream scheme is HTTP, only HTTP redirect targets are allowed. - [#extension: envoy.internal_redirect_predicates.safe_cross_scheme] - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/key_value/__init__.py b/src/envoy_data_plane/envoy/extensions/key_value/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/key_value/file_based/__init__.py b/src/envoy_data_plane/envoy/extensions/key_value/file_based/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/key_value/file_based/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/key_value/file_based/v3/__init__.py deleted file mode 100644 index 73b9b45..0000000 --- a/src/envoy_data_plane/envoy/extensions/key_value/file_based/v3/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/key_value/file_based/v3/config.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FileBasedKeyValueStoreConfig(betterproto.Message): - """ - [#extension: envoy.key_value.file_based] This is configuration to flush a - key value store out to disk. - """ - - # The filename to read the keys and values from, and write the keys and - # values to. - filename: str = betterproto.string_field(1) - # The interval at which the key value store should be flushed to the file. - flush_interval: timedelta = betterproto.message_field(2) diff --git a/src/envoy_data_plane/envoy/extensions/matching/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/matching/common_inputs/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/common_inputs/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/matching/common_inputs/environment_variable/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/common_inputs/environment_variable/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/matching/common_inputs/environment_variable/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/common_inputs/environment_variable/v3/__init__.py deleted file mode 100644 index 366fa60..0000000 --- a/src/envoy_data_plane/envoy/extensions/matching/common_inputs/environment_variable/v3/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - """Reads an environment variable to provide an input for matching.""" - - # Name of the environment variable to read from. - name: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/matching/input_matchers/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/input_matchers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/matching/input_matchers/consistent_hashing/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/input_matchers/consistent_hashing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/matching/input_matchers/consistent_hashing/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/input_matchers/consistent_hashing/v3/__init__.py deleted file mode 100644 index 00d8b7a..0000000 --- a/src/envoy_data_plane/envoy/extensions/matching/input_matchers/consistent_hashing/v3/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ConsistentHashing(betterproto.Message): - """ - The consistent hashing matchers computes a consistent hash from the input - and matches if the resulting hash is within the configured threshold. More - specifically, this matcher evaluates to true if hash(input, seed) % modulo - >= threshold. Note that the consistency of the match result relies on the - internal hash function (xxhash) remaining unchanged. While this is unlikely - to happen intentionally, this could cause inconsistent match results - between deployments. - """ - - # The threshold the resulting hash must be over in order for this matcher to - # evaluate to true. This value must be below the configured modulo value. - # Setting this to 0 is equivalent to this matcher always matching. - threshold: int = betterproto.uint32_field(1) - # The value to use for the modulus in the calculation. This effectively - # bounds the hash output, specifying the range of possible values. This value - # must be above the configured threshold. - modulo: int = betterproto.uint32_field(2) - # Optional seed passed through the hash function. This allows using - # additional information when computing the hash value: by changing the seed - # value, a different partition of matching and non-matching inputs will be - # created that remains consistent for that seed value. - seed: int = betterproto.uint64_field(3) diff --git a/src/envoy_data_plane/envoy/extensions/matching/input_matchers/ip/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/input_matchers/ip/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/matching/input_matchers/ip/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/matching/input_matchers/ip/v3/__init__.py deleted file mode 100644 index 6095392..0000000 --- a/src/envoy_data_plane/envoy/extensions/matching/input_matchers/ip/v3/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/matching/input_matchers/ip/v3/ip.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Ip(betterproto.Message): - """ - This input matcher matches IPv4 or IPv6 addresses against a list of CIDR - ranges. It returns true if and only if the input IP belongs to at least one - of these CIDR ranges. Internally, it uses a Level-Compressed trie, as - described in the paper `IP-address lookup using LC-tries - `_ by S. Nilsson and G. Karlsson. For "big" lists of IPs, this - matcher is more efficient than multiple single IP matcher, that would have - a linear cost. - """ - - # Match if the IP belongs to any of these CIDR ranges. - cidr_ranges: List["_____config_core_v3__.CidrRange"] = betterproto.message_field(1) - # The human readable prefix to use when emitting statistics for the IP input - # matcher. Names in the table below are concatenated to this prefix. .. csv- - # table:: :header: Name, Type, Description :widths: 1, 1, 2 - # ip_parsing_failed, Counter, Total number of IP addresses the matcher was - # unable to parse - stat_prefix: str = betterproto.string_field(2) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/network/__init__.py b/src/envoy_data_plane/envoy/extensions/network/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/network/dns_resolver/__init__.py b/src/envoy_data_plane/envoy/extensions/network/dns_resolver/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/network/dns_resolver/apple/__init__.py b/src/envoy_data_plane/envoy/extensions/network/dns_resolver/apple/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/network/dns_resolver/apple/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/network/dns_resolver/apple/v3/__init__.py deleted file mode 100644 index dfb50d4..0000000 --- a/src/envoy_data_plane/envoy/extensions/network/dns_resolver/apple/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/network/dns_resolver/apple/v3/apple_dns_resolver.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class AppleDnsResolverConfig(betterproto.Message): - """Configuration for apple DNS resolver.""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/network/dns_resolver/cares/__init__.py b/src/envoy_data_plane/envoy/extensions/network/dns_resolver/cares/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/network/dns_resolver/cares/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/network/dns_resolver/cares/v3/__init__.py deleted file mode 100644 index 8b3643d..0000000 --- a/src/envoy_data_plane/envoy/extensions/network/dns_resolver/cares/v3/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CaresDnsResolverConfig(betterproto.Message): - """Configuration for c-ares DNS resolver.""" - - # A list of dns resolver addresses. :ref:`use_resolvers_as_fallback` below dictates if the DNS client should override - # system defaults or only use the provided resolvers if the system defaults - # are not available, i.e., as a fallback. - resolvers: List["_____config_core_v3__.Address"] = betterproto.message_field(1) - # If true use the resolvers listed in the :ref:`resolvers` - # field only if c-ares is unable to obtain a nameserver from the system - # (e.g., /etc/resolv.conf). Otherwise, the resolvers listed in the resolvers - # list will override the default system resolvers. Defaults to false. - use_resolvers_as_fallback: bool = betterproto.bool_field(3) - # The resolver will query available network interfaces and determine if there - # are no available interfaces for a given IP family. It will then filter - # these addresses from the results it presents. e.g., if there are no - # available IPv4 network interfaces, the resolver will not provide IPv4 - # addresses. - filter_unroutable_families: bool = betterproto.bool_field(4) - # Configuration of DNS resolver option flags which control the behavior of - # the DNS resolver. - dns_resolver_options: "_____config_core_v3__.DnsResolverOptions" = ( - betterproto.message_field(2) - ) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/network/socket_interface/__init__.py b/src/envoy_data_plane/envoy/extensions/network/socket_interface/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/network/socket_interface/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/network/socket_interface/v3/__init__.py deleted file mode 100644 index e4d4fb2..0000000 --- a/src/envoy_data_plane/envoy/extensions/network/socket_interface/v3/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/network/socket_interface/v3/default_socket_interface.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class DefaultSocketInterface(betterproto.Message): - """ - Configuration for default socket interface that relies on OS dependent - syscall to create sockets. - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/quic/__init__.py b/src/envoy_data_plane/envoy/extensions/quic/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/quic/crypto_stream/__init__.py b/src/envoy_data_plane/envoy/extensions/quic/crypto_stream/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/quic/crypto_stream/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/quic/crypto_stream/v3/__init__.py deleted file mode 100644 index 86e0920..0000000 --- a/src/envoy_data_plane/envoy/extensions/quic/crypto_stream/v3/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CryptoServerStreamConfig(betterproto.Message): - """ - Configuration for the default QUIC server crypto stream provided by QUICHE. - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/quic/proof_source/__init__.py b/src/envoy_data_plane/envoy/extensions/quic/proof_source/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/quic/proof_source/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/quic/proof_source/v3/__init__.py deleted file mode 100644 index e7d71e2..0000000 --- a/src/envoy_data_plane/envoy/extensions/quic/proof_source/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/quic/proof_source/v3/proof_source.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ProofSourceConfig(betterproto.Message): - """Configuration for the default QUIC proof source.""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/__init__.py b/src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/expr/__init__.py b/src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/expr/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/expr/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/expr/v3/__init__.py deleted file mode 100644 index 119176b..0000000 --- a/src/envoy_data_plane/envoy/extensions/rate_limit_descriptors/expr/v3/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Descriptor(betterproto.Message): - """ - The following descriptor entry is appended with a value computed from a - symbolic Common Expression Language expression. See :ref:`attributes - ` for the set of available attributes. .. code- - block:: cpp ("", "") - """ - - # The key to use in the descriptor entry. - descriptor_key: str = betterproto.string_field(1) - # If set to true, Envoy skips the descriptor if the expression evaluates to - # an error. By default, the rate limit is not applied when an expression - # produces an error. - skip_if_error: bool = betterproto.bool_field(2) - # Expression in a text form, e.g. "connection.requested_server_name". - text: str = betterproto.string_field(3, group="expr_specifier") - # Parsed expression in AST form. - parsed: "_____google_api_expr_v1_alpha1__.Expr" = betterproto.message_field( - 4, group="expr_specifier" - ) - - -from ......google.api.expr import v1alpha1 as _____google_api_expr_v1_alpha1__ diff --git a/src/envoy_data_plane/envoy/extensions/rbac/__init__.py b/src/envoy_data_plane/envoy/extensions/rbac/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/rbac/matchers/__init__.py b/src/envoy_data_plane/envoy/extensions/rbac/matchers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/rbac/matchers/upstream_ip_port/__init__.py b/src/envoy_data_plane/envoy/extensions/rbac/matchers/upstream_ip_port/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/rbac/matchers/upstream_ip_port/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/rbac/matchers/upstream_ip_port/v3/__init__.py deleted file mode 100644 index 97c1bba..0000000 --- a/src/envoy_data_plane/envoy/extensions/rbac/matchers/upstream_ip_port/v3/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/rbac/matchers/upstream_ip_port/v3/upstream_ip_port_matcher.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class UpstreamIpPortMatcher(betterproto.Message): - """ - This is configuration for matching upstream ip and port. Note that although - both fields are optional, at least one of IP or port must be supplied. If - only one is supplied the other is a wildcard match. This matcher requires a - filter in the chain to have saved the upstream address in the filter state - before the matcher is executed by RBAC filter. The state should be saved - with key `envoy.stream.upstream_address` (See - :repo:`upstream_address.h`). - Also, See :repo:`proxy_filter.cc< - source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc>` for - an example of a filter which populates the FilterState. - """ - - # A CIDR block that will be used to match the upstream IP. Both Ipv4 and Ipv6 - # ranges can be matched. - upstream_ip: "_____config_core_v3__.CidrRange" = betterproto.message_field(1) - # A port range that will be used to match the upstream port. - upstream_port_range: "_____type_v3__.Int64Range" = betterproto.message_field(2) - - -from ......config.core import v3 as _____config_core_v3__ -from ......type import v3 as _____type_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/request_id/__init__.py b/src/envoy_data_plane/envoy/extensions/request_id/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/request_id/uuid/__init__.py b/src/envoy_data_plane/envoy/extensions/request_id/uuid/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/request_id/uuid/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/request_id/uuid/v3/__init__.py deleted file mode 100644 index 12c1e56..0000000 --- a/src/envoy_data_plane/envoy/extensions/request_id/uuid/v3/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/request_id/uuid/v3/uuid.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class UuidRequestIdConfig(betterproto.Message): - """ - Configuration for the default UUID request ID extension which has the - following behavior: 1. Request ID is propagated using the :ref:`x-request- - id ` header. 2. Request ID is - a universally unique identifier `(UUID4) `_. 3. Tracing decision - (sampled, forced, etc) is set in 14th nibble of the UUID. By default this - will overwrite existing UUIDs received in the *x-request-id* header if - the trace sampling decision is changed. The 14th nibble of the UUID4 has - been chosen because it is fixed to '4' by the standard. Thus, '4' - indicates a default UUID and no trace status. This nibble is swapped to: - a. '9': Sampled. b. 'a': Force traced due to server-side override. - c. 'b': Force traced due to client-side request ID joining. See the - :ref:`x-request-id ` - documentation for more information. - """ - - # Whether the implementation alters the UUID to contain the trace sampling - # decision as per the `UuidRequestIdConfig` message documentation. This - # defaults to true. If disabled no modification to the UUID will be - # performed. It is important to note that if disabled, stable sampling of - # traces, access logs, etc. will no longer work and only random sampling will - # be possible. - pack_trace_reason: Optional[bool] = betterproto.message_field( - 1, wraps=betterproto.TYPE_BOOL - ) - # Set whether to use :ref:`x-request-id` for sampling or not. This defaults to true. See the - # :ref:`context propagation ` - # overview for more information. - use_request_id_for_trace_sampling: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) diff --git a/src/envoy_data_plane/envoy/extensions/resource_monitors/__init__.py b/src/envoy_data_plane/envoy/extensions/resource_monitors/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/resource_monitors/fixed_heap/__init__.py b/src/envoy_data_plane/envoy/extensions/resource_monitors/fixed_heap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/resource_monitors/fixed_heap/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/resource_monitors/fixed_heap/v3/__init__.py deleted file mode 100644 index e620daa..0000000 --- a/src/envoy_data_plane/envoy/extensions/resource_monitors/fixed_heap/v3/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class FixedHeapConfig(betterproto.Message): - """ - The fixed heap resource monitor reports the Envoy process memory pressure, - computed as a fraction of currently reserved heap memory divided by a - statically configured maximum specified in the FixedHeapConfig. - """ - - max_heap_size_bytes: int = betterproto.uint64_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/resource_monitors/injected_resource/__init__.py b/src/envoy_data_plane/envoy/extensions/resource_monitors/injected_resource/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/resource_monitors/injected_resource/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/resource_monitors/injected_resource/v3/__init__.py deleted file mode 100644 index 83b9fd0..0000000 --- a/src/envoy_data_plane/envoy/extensions/resource_monitors/injected_resource/v3/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class InjectedResourceConfig(betterproto.Message): - """ - The injected resource monitor allows injecting a synthetic resource - pressure into Envoy via a text file, which must contain a floating-point - number in the range [0..1] representing the resource pressure and be - updated atomically by a symbolic link swap. This is intended primarily for - integration tests to force Envoy into an overloaded state. - """ - - filename: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/retry/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/retry/host/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/host/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/retry/host/omit_canary_hosts/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/host/omit_canary_hosts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/retry/host/omit_canary_hosts/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/host/omit_canary_hosts/v3/__init__.py deleted file mode 100644 index b06aabe..0000000 --- a/src/envoy_data_plane/envoy/extensions/retry/host/omit_canary_hosts/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OmitCanaryHostsPredicate(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/retry/host/omit_host_metadata/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/host/omit_host_metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/retry/host/omit_host_metadata/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/host/omit_host_metadata/v3/__init__.py deleted file mode 100644 index 5804da5..0000000 --- a/src/envoy_data_plane/envoy/extensions/retry/host/omit_host_metadata/v3/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class OmitHostMetadataConfig(betterproto.Message): - """ - A retry host predicate that can be used to reject a host based on - predefined metadata match criteria. [#extension: - envoy.retry_host_predicates.omit_host_metadata] - """ - - # Retry host predicate metadata match criteria. The hosts in the upstream - # cluster with matching metadata will be omitted while attempting a retry of - # a failed request. The metadata should be specified under the *envoy.lb* - # key. - metadata_match: "_____config_core_v3__.Metadata" = betterproto.message_field(1) - - -from ......config.core import v3 as _____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/retry/host/previous_hosts/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/host/previous_hosts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/retry/host/previous_hosts/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/host/previous_hosts/v3/__init__.py deleted file mode 100644 index ed2a5a5..0000000 --- a/src/envoy_data_plane/envoy/extensions/retry/host/previous_hosts/v3/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class PreviousHostsPredicate(betterproto.Message): - pass diff --git a/src/envoy_data_plane/envoy/extensions/retry/priority/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/priority/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/retry/priority/previous_priorities/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/priority/previous_priorities/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/retry/priority/previous_priorities/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/retry/priority/previous_priorities/v3/__init__.py deleted file mode 100644 index ac76f90..0000000 --- a/src/envoy_data_plane/envoy/extensions/retry/priority/previous_priorities/v3/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class PreviousPrioritiesConfig(betterproto.Message): - """ - A retry host selector that attempts to spread retries between priorities, - even if certain priorities would not normally be attempted due to higher - priorities being available. As priorities get excluded, load will be - distributed amongst the remaining healthy priorities based on the relative - health of the priorities, matching how load is distributed during regular - host selection. For example, given priority healths of {100, 50, 50}, the - original load will be {100, 0, 0} (since P0 has capacity to handle 100% of - the traffic). If P0 is excluded, the load changes to {0, 50, 50}, because - P1 is only able to handle 50% of the traffic, causing the remaining to - spill over to P2. Each priority attempted will be excluded until there are - no healthy priorities left, at which point the list of attempted priorities - will be reset, essentially starting from the beginning. For example, given - three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, - the following sequence of priorities would be selected (assuming - update_frequency = 1): Attempt 1: P0 (P0 is 100% healthy) Attempt 2: P2 (P0 - already attempted, P2 only healthy priority) Attempt 3: P0 (no healthy - priorities, reset) Attempt 4: P2 In the case of all upstream hosts being - unhealthy, no adjustments will be made to the original priority load, so - behavior should be identical to not using this plugin. Using this - PriorityFilter requires rebuilding the priority load, which runs in O(# of - priorities), which might incur significant overhead for clusters with many - priorities. [#extension: envoy.retry_priorities.previous_priorities] - """ - - # How often the priority load should be updated based on previously attempted - # priorities. Useful to allow each priorities to receive more than one - # request before being excluded or to reduce the number of times that the - # priority load has to be recomputed. For example, by setting this to 2, then - # the first two attempts (initial attempt and first retry) will use the - # unmodified priority load. The third and fourth attempt will use priority - # load which excludes the priorities routed to with the first two attempts, - # and the fifth and sixth attempt will use the priority load excluding the - # priorities used for the first four attempts. Must be greater than 0. - update_frequency: int = betterproto.int32_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/stat_sinks/__init__.py b/src/envoy_data_plane/envoy/extensions/stat_sinks/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/stat_sinks/graphite_statsd/__init__.py b/src/envoy_data_plane/envoy/extensions/stat_sinks/graphite_statsd/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/stat_sinks/graphite_statsd/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/stat_sinks/graphite_statsd/v3/__init__.py deleted file mode 100644 index 4cb0473..0000000 --- a/src/envoy_data_plane/envoy/extensions/stat_sinks/graphite_statsd/v3/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GraphiteStatsdSink(betterproto.Message): - # The UDP address of a running Graphite-compliant listener. If specified, - # statistics will be flushed to this address. - address: "____config_core_v3__.Address" = betterproto.message_field( - 1, group="statsd_specifier" - ) - # Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - # ` for more details. - prefix: str = betterproto.string_field(3) - # Optional max datagram size to use when sending UDP messages. By default - # Envoy will emit one metric per datagram. By specifying a max-size larger - # than a single metric, Envoy will emit multiple, new-line separated metrics. - # The max datagram size should not exceed your network's MTU. Note that this - # value may not be respected if smaller than a single metric. - max_bytes_per_datagram: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT64 - ) - - -from .....config.core import v3 as ____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/stat_sinks/wasm/__init__.py b/src/envoy_data_plane/envoy/extensions/stat_sinks/wasm/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/stat_sinks/wasm/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/stat_sinks/wasm/v3/__init__.py deleted file mode 100644 index 5f56ab4..0000000 --- a/src/envoy_data_plane/envoy/extensions/stat_sinks/wasm/v3/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/stat_sinks/wasm/v3/wasm.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Wasm(betterproto.Message): - # General Plugin configuration. - config: "___wasm_v3__.PluginConfig" = betterproto.message_field(1) - - -from ....wasm import v3 as ___wasm_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/alts/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/alts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/alts/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/alts/v3/__init__.py deleted file mode 100644 index 282f2c2..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/alts/v3/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/alts/v3/alts.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Alts(betterproto.Message): - """ - Configuration for ALTS transport socket. This provides Google's ALTS - protocol to Envoy. Store the peer identity in dynamic metadata, namespace - is "envoy.transport_socket.peer_information", key is "peer_identity". - https://cloud.google.com/security/encryption-in-transit/application-layer- - transport-security/ - """ - - # The location of a handshaker service, this is usually 169.254.169.254:8080 - # on GCE. - handshaker_service: str = betterproto.string_field(1) - # The acceptable service accounts from peer, peers not in the list will be - # rejected in the handshake validation step. If empty, no validation will be - # performed. - peer_service_accounts: List[str] = betterproto.string_field(2) diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/proxy_protocol/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/proxy_protocol/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/proxy_protocol/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/proxy_protocol/v3/__init__.py deleted file mode 100644 index b03ce93..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/proxy_protocol/v3/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ProxyProtocolUpstreamTransport(betterproto.Message): - """Configuration for PROXY protocol socket""" - - # The PROXY protocol settings - config: "____config_core_v3__.ProxyProtocolConfig" = betterproto.message_field(1) - # The underlying transport socket being wrapped. - transport_socket: "____config_core_v3__.TransportSocket" = ( - betterproto.message_field(2) - ) - - -from .....config.core import v3 as ____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/quic/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/quic/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/quic/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/quic/v3/__init__.py deleted file mode 100644 index 3b6d220..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/quic/v3/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/quic/v3/quic_transport.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class QuicDownstreamTransport(betterproto.Message): - """ - Configuration for Downstream QUIC transport socket. This provides Google's - implementation of Google QUIC and IETF QUIC to Envoy. - """ - - downstream_tls_context: "__tls_v3__.DownstreamTlsContext" = ( - betterproto.message_field(1) - ) - - -@dataclass(eq=False, repr=False) -class QuicUpstreamTransport(betterproto.Message): - """ - Configuration for Upstream QUIC transport socket. This provides Google's - implementation of Google QUIC and IETF QUIC to Envoy. - """ - - upstream_tls_context: "__tls_v3__.UpstreamTlsContext" = betterproto.message_field(1) - - -from ...tls import v3 as __tls_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/raw_buffer/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/raw_buffer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/raw_buffer/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/raw_buffer/v3/__init__.py deleted file mode 100644 index 04a258a..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/raw_buffer/v3/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RawBuffer(betterproto.Message): - """Configuration for raw buffer transport socket.""" - - pass diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/s2a/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/s2a/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/s2a/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/s2a/v3/__init__.py deleted file mode 100644 index ab3d825..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/s2a/v3/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/s2a/v3/s2a.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class S2AConfiguration(betterproto.Message): - """ - [#not-implemented-hide:] Configuration for S2A transport socket. This - allows Envoy clients to configure how to offload mTLS handshakes to the S2A - service. https://github.com/google/s2a-core#readme - """ - - # The address of the S2A. This can be an IP address or a hostname, followed - # by a port number. - s2_a_address: str = betterproto.string_field(1) diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/starttls/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/starttls/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/starttls/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/starttls/v3/__init__.py deleted file mode 100644 index d06f6bd..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/starttls/v3/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/starttls/v3/starttls.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class StartTlsConfig(betterproto.Message): - """ - Configuration for a downstream StartTls transport socket. StartTls - transport socket wraps two sockets: * raw_buffer socket which is used at - the beginning of the session * TLS socket used when a protocol negotiates a - switch to encrypted traffic. - """ - - # (optional) Configuration for clear-text socket used at the beginning of the - # session. - cleartext_socket_config: "__raw_buffer_v3__.RawBuffer" = betterproto.message_field( - 1 - ) - # Configuration for a downstream TLS socket. - tls_socket_config: "__tls_v3__.DownstreamTlsContext" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class UpstreamStartTlsConfig(betterproto.Message): - """ - Configuration for an upstream StartTls transport socket. StartTls transport - socket wraps two sockets: * raw_buffer socket which is used at the - beginning of the session * TLS socket used when a protocol negotiates a - switch to encrypted traffic. - """ - - # (optional) Configuration for clear-text socket used at the beginning of the - # session. - cleartext_socket_config: "__raw_buffer_v3__.RawBuffer" = betterproto.message_field( - 1 - ) - # Configuration for an upstream TLS socket. - tls_socket_config: "__tls_v3__.UpstreamTlsContext" = betterproto.message_field(2) - - -from ...raw_buffer import v3 as __raw_buffer_v3__ -from ...tls import v3 as __tls_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/tap/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/tap/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/tap/v3/__init__.py deleted file mode 100644 index 5750183..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/tap/v3/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/tap/v3/tap.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Tap(betterproto.Message): - """ - Configuration for tap transport socket. This wraps another transport - socket, providing the ability to interpose and record in plain text any - traffic that is surfaced to Envoy. - """ - - # Common configuration for the tap transport socket. - common_config: "___common_tap_v3__.CommonExtensionConfig" = ( - betterproto.message_field(1) - ) - # The underlying transport socket being wrapped. - transport_socket: "____config_core_v3__.TransportSocket" = ( - betterproto.message_field(2) - ) - - -from .....config.core import v3 as ____config_core_v3__ -from ....common.tap import v3 as ___common_tap_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/tcp_stats/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/tcp_stats/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/tcp_stats/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/tcp_stats/v3/__init__.py deleted file mode 100644 index bf4fcab..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/tcp_stats/v3/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Config(betterproto.Message): - """ - Configuration for the TCP Stats transport socket wrapper, which wraps - another transport socket for all communication, but emits stats about the - underlying TCP connection. The stats are documented :ref:`here - ` for listeners and :ref:`here - ` for clusters. This transport - socket is currently only supported on Linux. - """ - - # The underlying transport socket being wrapped. - transport_socket: "____config_core_v3__.TransportSocket" = ( - betterproto.message_field(1) - ) - # Period to update stats while the connection is open. If unset, updates only - # happen when the connection is closed. Stats are always updated one final - # time when the connection is closed. - update_period: timedelta = betterproto.message_field(2) - - -from .....config.core import v3 as ____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/tls/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/tls/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/transport_sockets/tls/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/transport_sockets/tls/v3/__init__.py deleted file mode 100644 index 1a09abd..0000000 --- a/src/envoy_data_plane/envoy/extensions/transport_sockets/tls/v3/__init__.py +++ /dev/null @@ -1,753 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/transport_sockets/tls/v3/cert.proto, envoy/extensions/transport_sockets/tls/v3/common.proto, envoy/extensions/transport_sockets/tls/v3/secret.proto, envoy/extensions/transport_sockets/tls/v3/tls.proto, envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class TlsParametersTlsProtocol(betterproto.Enum): - TLS_AUTO = 0 - TLSv1_0 = 1 - TLSv1_1 = 2 - TLSv1_2 = 3 - TLSv1_3 = 4 - - -class SubjectAltNameMatcherSanType(betterproto.Enum): - SAN_TYPE_UNSPECIFIED = 0 - EMAIL = 1 - DNS = 2 - URI = 3 - IP_ADDRESS = 4 - - -class CertificateValidationContextTrustChainVerification(betterproto.Enum): - VERIFY_TRUST_CHAIN = 0 - ACCEPT_UNTRUSTED = 1 - - -class DownstreamTlsContextOcspStaplePolicy(betterproto.Enum): - LENIENT_STAPLING = 0 - STRICT_STAPLING = 1 - MUST_STAPLE = 2 - - -@dataclass(eq=False, repr=False) -class TlsParameters(betterproto.Message): - # Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and - # ``TLSv1_0`` for servers. - tls_minimum_protocol_version: "TlsParametersTlsProtocol" = betterproto.enum_field(1) - # Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and - # ``TLSv1_3`` for servers. - tls_maximum_protocol_version: "TlsParametersTlsProtocol" = betterproto.enum_field(2) - # If specified, the TLS listener will only support the specified `cipher list - # `_ when negotiating TLS 1.0-1.2 - # (this setting has no effect when negotiating TLS 1.3). If not specified, a - # default list will be used. Defaults are different for server (downstream) - # and client (upstream) TLS configurations. In non-FIPS builds, the default - # server cipher list is: .. code-block:: none [ECDHE-ECDSA-AES128-GCM- - # SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] [ECDHE-RSA-AES128-GCM-SHA256|ECDHE- - # RSA-CHACHA20-POLY1305] ECDHE-ECDSA-AES128-SHA ECDHE-RSA-AES128-SHA - # AES128-GCM-SHA256 AES128-SHA ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA- - # AES256-GCM-SHA384 ECDHE-ECDSA-AES256-SHA ECDHE-RSA-AES256-SHA - # AES256-GCM-SHA384 AES256-SHA In builds using :ref:`BoringSSL FIPS - # `, the default server cipher list is: .. code- - # block:: none ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-RSA-AES128-GCM-SHA256 - # ECDHE-ECDSA-AES128-SHA ECDHE-RSA-AES128-SHA AES128-GCM-SHA256 - # AES128-SHA ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA-AES256-GCM-SHA384 - # ECDHE-ECDSA-AES256-SHA ECDHE-RSA-AES256-SHA AES256-GCM-SHA384 - # AES256-SHA In non-FIPS builds, the default client cipher list is: .. code- - # block:: none [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA- - # CHACHA20-POLY1305] [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA- - # CHACHA20-POLY1305] ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA-AES256-GCM- - # SHA384 In builds using :ref:`BoringSSL FIPS `, the - # default client cipher list is: .. code-block:: none ECDHE-ECDSA- - # AES128-GCM-SHA256 ECDHE-RSA-AES128-GCM-SHA256 ECDHE-ECDSA-AES256-GCM- - # SHA384 ECDHE-RSA-AES256-GCM-SHA384 - cipher_suites: List[str] = betterproto.string_field(3) - # If specified, the TLS connection will only support the specified ECDH - # curves. If not specified, the default curves will be used. In non-FIPS - # builds, the default curves are: .. code-block:: none X25519 P-256 In - # builds using :ref:`BoringSSL FIPS `, the default - # curve is: .. code-block:: none P-256 - ecdh_curves: List[str] = betterproto.string_field(4) - - -@dataclass(eq=False, repr=False) -class PrivateKeyProvider(betterproto.Message): - """ - BoringSSL private key method configuration. The private key methods are - used for external (potentially asynchronous) signing and decryption - operations. Some use cases for private key methods would be TPM support and - TLS acceleration. - """ - - # Private key method provider name. The name must match a supported private - # key method provider type. - provider_name: str = betterproto.string_field(1) - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field( - 3, group="config_type" - ) - - -@dataclass(eq=False, repr=False) -class TlsCertificate(betterproto.Message): - """[#next-free-field: 9]""" - - # The TLS certificate chain. If *certificate_chain* is a filesystem path, a - # watch will be added to the parent directory for any file moves to support - # rotation. This currently only applies to dynamic secrets, when the - # *TlsCertificate* is delivered via SDS. - certificate_chain: "____config_core_v3__.DataSource" = betterproto.message_field(1) - # The TLS private key. If *private_key* is a filesystem path, a watch will be - # added to the parent directory for any file moves to support rotation. This - # currently only applies to dynamic secrets, when the *TlsCertificate* is - # delivered via SDS. - private_key: "____config_core_v3__.DataSource" = betterproto.message_field(2) - # `Pkcs12` data containing TLS certificate, chain, and private key. If - # *pkcs12* is a filesystem path, the file will be read, but no watch will be - # added to the parent directory, since *pkcs12* isn't used by SDS. This field - # is mutually exclusive with *certificate_chain*, *private_key* and - # *private_key_provider*. This can't be marked as ``oneof`` due to API - # compatibility reasons. Setting both :ref:`private_key `, - # :ref:`certificate_chain `, or :ref:`private_key_provider ` and :ref:`pkcs12 ` fields will result in an error. Use - # :ref:`password ` to specify the password to unprotect the `PKCS12` - # data, if necessary. - pkcs12: "____config_core_v3__.DataSource" = betterproto.message_field(8) - # If specified, updates of file-based *certificate_chain* and *private_key* - # sources will be triggered by this watch. The certificate/key pair will be - # read together and validated for atomic read consistency (i.e. no - # intervening modification occurred between cert/key read, verified by file - # hash comparisons). This allows explicit control over the path watched, by - # default the parent directories of the filesystem paths in - # *certificate_chain* and *private_key* are watched if this field is not - # specified. This only applies when a *TlsCertificate* is delivered by SDS - # with references to filesystem paths. See the :ref:`SDS key rotation - # ` documentation for further details. - watched_directory: "____config_core_v3__.WatchedDirectory" = ( - betterproto.message_field(7) - ) - # BoringSSL private key method provider. This is an alternative to - # :ref:`private_key ` field. This can't be marked as ``oneof`` due to - # API compatibility reasons. Setting both :ref:`private_key ` and - # :ref:`private_key_provider ` fields will result in an - # error. - private_key_provider: "PrivateKeyProvider" = betterproto.message_field(6) - # The password to decrypt the TLS private key. If this field is not set, it - # is assumed that the TLS private key is not password encrypted. - password: "____config_core_v3__.DataSource" = betterproto.message_field(3) - # The OCSP response to be stapled with this certificate during the handshake. - # The response must be DER-encoded and may only be provided via ``filename`` - # or ``inline_bytes``. The response may pertain to only one certificate. - ocsp_staple: "____config_core_v3__.DataSource" = betterproto.message_field(4) - # [#not-implemented-hide:] - signed_certificate_timestamp: List[ - "____config_core_v3__.DataSource" - ] = betterproto.message_field(5) - - -@dataclass(eq=False, repr=False) -class TlsSessionTicketKeys(betterproto.Message): - # Keys for encrypting and decrypting TLS session tickets. The first key in - # the array contains the key to encrypt all new sessions created by this - # context. All keys are candidates for decrypting received tickets. This - # allows for easy rotation of keys by, for example, putting the new key - # first, and the previous key second. If :ref:`session_ticket_keys ` is not specified, the TLS library will still support resuming - # sessions via tickets, but it will use an internally-generated and managed - # key, so sessions cannot be resumed across hot restarts or on different - # hosts. Each key must contain exactly 80 bytes of cryptographically-secure - # random data. For example, the output of ``openssl rand 80``. .. attention:: - # Using this feature has serious security considerations and risks. Improper - # handling of keys may result in loss of secrecy in connections, even if - # ciphers supporting perfect forward secrecy are used. See - # https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - # discussion. To minimize the risk, you must: * Keep the session ticket - # keys at least as secure as your TLS certificate private keys * Rotate - # session ticket keys at least daily, and preferably hourly * Always - # generate keys using a cryptographically-secure random data source - keys: List["____config_core_v3__.DataSource"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CertificateProviderPluginInstance(betterproto.Message): - """ - Indicates a certificate to be obtained from a named CertificateProvider - plugin instance. The plugin instances are defined in the client's bootstrap - file. The plugin allows certificates to be fetched/refreshed over the - network asynchronously with respect to the TLS handshake. [#not- - implemented-hide:] - """ - - # Provider instance name. If not present, defaults to "default". Instance - # names should generally be defined not in terms of the underlying provider - # implementation (e.g., "file_watcher") but rather in terms of the function - # of the certificates (e.g., "foo_deployment_identity"). - instance_name: str = betterproto.string_field(1) - # Opaque name used to specify certificate instances or types. For example, - # "ROOTCA" to specify a root-certificate (validation context) or - # "example.com" to specify a certificate for a particular domain. Not all - # provider instances will actually use this field, so the value defaults to - # the empty string. - certificate_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class SubjectAltNameMatcher(betterproto.Message): - """ - Matcher for subject alternative names, to match both type and value of the - SAN. - """ - - # Specification of type of SAN. Note that the default enum value is an - # invalid choice. - san_type: "SubjectAltNameMatcherSanType" = betterproto.enum_field(1) - # Matcher for SAN value. - matcher: "____type_matcher_v3__.StringMatcher" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class CertificateValidationContext(betterproto.Message): - """[#next-free-field: 16]""" - - # TLS certificate data containing certificate authority certificates to use - # in verifying a presented peer certificate (e.g. server certificate for - # clusters or client certificate for listeners). If not specified and a peer - # certificate is presented it will not be verified. By default, a client - # certificate is optional, unless one of the additional options - # (:ref:`require_client_certificate `, - # :ref:`verify_certificate_spki `, - # :ref:`verify_certificate_hash `, or - # :ref:`match_typed_subject_alt_names `) is also specified. It can optionally contain certificate revocation - # lists, in which case Envoy will verify that the presented peer certificate - # has not been revoked by one of the included CRLs. Note that if a CRL is - # provided for any certificate authority in a trust chain, a CRL must be - # provided for all certificate authorities in that chain. Failure to do so - # will result in verification failure for both revoked and unrevoked - # certificates from that chain. The behavior of requiring all certificates to - # contain CRLs if any do can be altered by setting - # :ref:`only_verify_leaf_cert_crl ` true. - # If set to true, only the final certificate in the chain undergoes CRL - # verification. See :ref:`the TLS overview - # ` for a list of common system CA - # locations. If *trusted_ca* is a filesystem path, a watch will be added to - # the parent directory for any file moves to support rotation. This currently - # only applies to dynamic secrets, when the *CertificateValidationContext* is - # delivered via SDS. Only one of *trusted_ca* and - # *ca_certificate_provider_instance* may be specified. [#next-major-version: - # This field and watched_directory below should ideally be moved into a - # separate sub-message, since there's no point in specifying the latter field - # without this one.] - trusted_ca: "____config_core_v3__.DataSource" = betterproto.message_field(1) - # Certificate provider instance for fetching TLS certificates. Only one of - # *trusted_ca* and *ca_certificate_provider_instance* may be specified. - # [#not-implemented-hide:] - ca_certificate_provider_instance: "CertificateProviderPluginInstance" = ( - betterproto.message_field(13) - ) - # If specified, updates of a file-based *trusted_ca* source will be triggered - # by this watch. This allows explicit control over the path watched, by - # default the parent directory of the filesystem path in *trusted_ca* is - # watched if this field is not specified. This only applies when a - # *CertificateValidationContext* is delivered by SDS with references to - # filesystem paths. See the :ref:`SDS key rotation ` - # documentation for further details. - watched_directory: "____config_core_v3__.WatchedDirectory" = ( - betterproto.message_field(11) - ) - # An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will - # verify that the SHA-256 of the DER-encoded Subject Public Key Information - # (SPKI) of the presented certificate matches one of the specified values. A - # base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the - # certificate can be generated with the following command: .. code-block:: - # bash $ openssl x509 -in path/to/client.crt -noout -pubkey | openssl - # pkey -pubin -outform DER | openssl dgst -sha256 -binary | openssl - # enc -base64 NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= This is the - # format used in HTTP Public Key Pinning. When both: - # :ref:`verify_certificate_hash ` and - # :ref:`verify_certificate_spki ` are - # specified, a hash matching value from either of the lists will result in - # the certificate being accepted. .. attention:: This option is preferred - # over :ref:`verify_certificate_hash `, - # because SPKI is tied to a private key, so it doesn't change when the - # certificate is renewed using the same private key. - verify_certificate_spki: List[str] = betterproto.string_field(3) - # An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will - # verify that the SHA-256 of the DER-encoded presented certificate matches - # one of the specified values. A hex-encoded SHA-256 of the certificate can - # be generated with the following command: .. code-block:: bash $ openssl - # x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " - # -f2 df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a A - # long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the - # certificate can be generated with the following command: .. code-block:: - # bash $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | - # cut -d"=" -f2 DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83 - # :FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A Both of those formats are acceptable. - # When both: :ref:`verify_certificate_hash - # ` and :ref:`verify_certificate_spki ` are - # specified, a hash matching value from either of the lists will result in - # the certificate being accepted. - verify_certificate_hash: List[str] = betterproto.string_field(2) - # An optional list of Subject Alternative name matchers. If specified, Envoy - # will verify that the Subject Alternative Name of the presented certificate - # matches one of the specified matchers. The matching uses "any" semantics, - # that is to say, the SAN is verified if at least one matcher is matched. - # When a certificate has wildcard DNS SAN entries, to match a specific - # client, it should be configured with exact match type in the :ref:`string - # matcher `. For example if - # the certificate has "\*.example.com" as DNS SAN entry, to allow only - # "api.example.com", it should be configured as shown below. .. code-block:: - # yaml match_typed_subject_alt_names: - san_type: DNS matcher: - # exact: "api.example.com" .. attention:: Subject Alternative Names are - # easily spoofable and verifying only them is insecure, therefore this - # option must be used together with :ref:`trusted_ca ` - # . - match_typed_subject_alt_names: List[ - "SubjectAltNameMatcher" - ] = betterproto.message_field(15) - # This field is deprecated in favor of ref:`match_typed_subject_alt_names ` - match_subject_alt_names: List[ - "____type_matcher_v3__.StringMatcher" - ] = betterproto.message_field(9) - # [#not-implemented-hide:] Must present signed certificate time-stamp. - require_signed_certificate_timestamp: Optional[bool] = betterproto.message_field( - 6, wraps=betterproto.TYPE_BOOL - ) - # An optional `certificate revocation list - # `_ (in PEM - # format). If specified, Envoy will verify that the presented peer - # certificate has not been revoked by this CRL. If this DataSource contains - # multiple CRLs, all of them will be used. Note that if a CRL is provided for - # any certificate authority in a trust chain, a CRL must be provided for all - # certificate authorities in that chain. Failure to do so will result in - # verification failure for both revoked and unrevoked certificates from that - # chain. This default behavior can be altered by setting - # :ref:`only_verify_leaf_cert_crl ` to - # true. - crl: "____config_core_v3__.DataSource" = betterproto.message_field(7) - # If specified, Envoy will not reject expired certificates. - allow_expired_certificate: bool = betterproto.bool_field(8) - # Certificate trust chain verification mode. - trust_chain_verification: "CertificateValidationContextTrustChainVerification" = ( - betterproto.enum_field(10) - ) - # The configuration of an extension specific certificate validator. If - # specified, all validation is done by the specified validator, and the - # behavior of all other validation settings is defined by the specified - # validator (and may be entirely ignored, unused, and unvalidated). Refer to - # the documentation for the specified validator. If you do not want a custom - # validation algorithm, do not set this field. [#extension-category: - # envoy.tls.cert_validator] - custom_validator_config: "____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(12) - ) - # If this option is set to true, only the certificate at the end of the - # certificate chain will be subject to validation by :ref:`CRL - # `. - only_verify_leaf_cert_crl: bool = betterproto.bool_field(14) - - def __post_init__(self) -> None: - super().__post_init__() - if self.match_subject_alt_names: - warnings.warn( - "CertificateValidationContext.match_subject_alt_names is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class GenericSecret(betterproto.Message): - # Secret of generic type and is available to filters. - secret: "____config_core_v3__.DataSource" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class SdsSecretConfig(betterproto.Message): - # Name by which the secret can be uniquely referred to. When both name and - # config are specified, then secret can be fetched and/or reloaded via SDS. - # When only name is specified, then secret will be loaded from static - # resources. - name: str = betterproto.string_field(1) - sds_config: "____config_core_v3__.ConfigSource" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Secret(betterproto.Message): - """[#next-free-field: 6]""" - - # Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely - # referred to. - name: str = betterproto.string_field(1) - tls_certificate: "TlsCertificate" = betterproto.message_field(2, group="type") - session_ticket_keys: "TlsSessionTicketKeys" = betterproto.message_field( - 3, group="type" - ) - validation_context: "CertificateValidationContext" = betterproto.message_field( - 4, group="type" - ) - generic_secret: "GenericSecret" = betterproto.message_field(5, group="type") - - -@dataclass(eq=False, repr=False) -class UpstreamTlsContext(betterproto.Message): - # Common TLS context settings. .. attention:: Server certificate - # verification is not enabled by default. Configure :ref:`trusted_ca` to enable verification. - common_tls_context: "CommonTlsContext" = betterproto.message_field(1) - # SNI string to use when creating TLS backend connections. - sni: str = betterproto.string_field(2) - # If true, server-initiated TLS renegotiation will be allowed. .. attention:: - # TLS renegotiation is considered insecure and shouldn't be used unless - # absolutely necessary. - allow_renegotiation: bool = betterproto.bool_field(3) - # Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs - # and Session Tickets for TLSv1.2 and older) to store for the purpose of - # session resumption. Defaults to 1, setting this to 0 disables session - # resumption. - max_session_keys: Optional[int] = betterproto.message_field( - 4, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class DownstreamTlsContext(betterproto.Message): - """[#next-free-field: 9]""" - - # Common TLS context settings. - common_tls_context: "CommonTlsContext" = betterproto.message_field(1) - # If specified, Envoy will reject connections without a valid client - # certificate. - require_client_certificate: Optional[bool] = betterproto.message_field( - 2, wraps=betterproto.TYPE_BOOL - ) - # If specified, Envoy will reject connections without a valid and matching - # SNI. [#not-implemented-hide:] - require_sni: Optional[bool] = betterproto.message_field( - 3, wraps=betterproto.TYPE_BOOL - ) - # TLS session ticket key settings. - session_ticket_keys: "TlsSessionTicketKeys" = betterproto.message_field( - 4, group="session_ticket_keys_type" - ) - # Config for fetching TLS session ticket keys via SDS API. - session_ticket_keys_sds_secret_config: "SdsSecretConfig" = ( - betterproto.message_field(5, group="session_ticket_keys_type") - ) - # Config for controlling stateless TLS session resumption: setting this to - # true will cause the TLS server to not issue TLS session tickets for the - # purposes of stateless TLS session resumption. If set to false, the TLS - # server will issue TLS session tickets and encrypt/decrypt them using the - # keys specified through either :ref:`session_ticket_keys ` or :ref:`session_ticket_keys_sds_secret_config `. If this config is set to false and no keys are - # explicitly configured, the TLS server will issue TLS session tickets and - # encrypt/decrypt them using an internally-generated and managed key, with - # the implication that sessions cannot be resumed across hot restarts or on - # different hosts. - disable_stateless_session_resumption: bool = betterproto.bool_field( - 7, group="session_ticket_keys_type" - ) - # If specified, ``session_timeout`` will change the maximum lifetime (in - # seconds) of the TLS session. Currently this value is used as a hint for the - # `TLS session ticket lifetime (for TLSv1.2) - # `_. Only seconds can be - # specified (fractional seconds are ignored). - session_timeout: timedelta = betterproto.message_field(6) - # Config for whether to use certificates if they do not have an accompanying - # OCSP response or if the response expires at runtime. Defaults to - # LENIENT_STAPLING - ocsp_staple_policy: "DownstreamTlsContextOcspStaplePolicy" = betterproto.enum_field( - 8 - ) - - -@dataclass(eq=False, repr=False) -class CommonTlsContext(betterproto.Message): - """ - TLS context shared by both client and server TLS contexts. [#next-free- - field: 15] - """ - - # TLS protocol versions, cipher suites etc. - tls_params: "TlsParameters" = betterproto.message_field(1) - # :ref:`Multiple TLS certificates ` can be - # associated with the same context to allow both RSA and ECDSA certificates. - # Only a single TLS certificate is supported in client contexts. In server - # contexts, the first RSA certificate is used for clients that only support - # RSA and the first ECDSA certificate is used for clients that support ECDSA. - # Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, and - # *tls_certificate_provider_instance* may be used. [#next-major-version: - # These mutually exclusive fields should ideally be in a oneof, but it's not - # legal to put a repeated field in a oneof. In the next major version, we - # should rework this to avoid this problem.] - tls_certificates: List["TlsCertificate"] = betterproto.message_field(2) - # Configs for fetching TLS certificates via SDS API. Note SDS API allows - # certificates to be fetched/refreshed over the network asynchronously with - # respect to the TLS handshake. The same number and types of certificates as - # :ref:`tls_certificates ` are valid in the the certificates - # fetched through this setting. Only one of *tls_certificates*, - # *tls_certificate_sds_secret_configs*, and - # *tls_certificate_provider_instance* may be used. [#next-major-version: - # These mutually exclusive fields should ideally be in a oneof, but it's not - # legal to put a repeated field in a oneof. In the next major version, we - # should rework this to avoid this problem.] - tls_certificate_sds_secret_configs: List[ - "SdsSecretConfig" - ] = betterproto.message_field(6) - # Certificate provider instance for fetching TLS certs. Only one of - # *tls_certificates*, *tls_certificate_sds_secret_configs*, and - # *tls_certificate_provider_instance* may be used. [#not-implemented-hide:] - tls_certificate_provider_instance: "CertificateProviderPluginInstance" = ( - betterproto.message_field(14) - ) - # Certificate provider for fetching TLS certificates. [#not-implemented- - # hide:] - tls_certificate_certificate_provider: "CommonTlsContextCertificateProvider" = ( - betterproto.message_field(9) - ) - # Certificate provider instance for fetching TLS certificates. [#not- - # implemented-hide:] - tls_certificate_certificate_provider_instance: "CommonTlsContextCertificateProviderInstance" = betterproto.message_field( - 11 - ) - # How to validate peer certificates. - validation_context: "CertificateValidationContext" = betterproto.message_field( - 3, group="validation_context_type" - ) - # Config for fetching validation context via SDS API. Note SDS API allows - # certificates to be fetched/refreshed over the network asynchronously with - # respect to the TLS handshake. - validation_context_sds_secret_config: "SdsSecretConfig" = betterproto.message_field( - 7, group="validation_context_type" - ) - # Combined certificate validation context holds a default - # CertificateValidationContext and SDS config. When SDS server returns - # dynamic CertificateValidationContext, both dynamic and default - # CertificateValidationContext are merged into a new - # CertificateValidationContext for validation. This merge is done by - # Message::MergeFrom(), so dynamic CertificateValidationContext overwrites - # singular fields in default CertificateValidationContext, and concatenates - # repeated fields to default CertificateValidationContext, and logical OR is - # applied to boolean fields. - combined_validation_context: "CommonTlsContextCombinedCertificateValidationContext" = betterproto.message_field( - 8, group="validation_context_type" - ) - # Certificate provider for fetching validation context. [#not-implemented- - # hide:] - validation_context_certificate_provider: "CommonTlsContextCertificateProvider" = ( - betterproto.message_field(10, group="validation_context_type") - ) - # Certificate provider instance for fetching validation context. [#not- - # implemented-hide:] - validation_context_certificate_provider_instance: "CommonTlsContextCertificateProviderInstance" = betterproto.message_field( - 12, group="validation_context_type" - ) - # Supplies the list of ALPN protocols that the listener should expose. In - # practice this is likely to be set to one of two values (see the - # :ref:`codec_type ` parameter in the HTTP - # connection manager for more information): * "h2,http/1.1" If the listener - # is going to support both HTTP/2 and HTTP/1.1. * "http/1.1" If the listener - # is only going to support HTTP/1.1. There is no default for this parameter. - # If empty, Envoy will not expose ALPN. - alpn_protocols: List[str] = betterproto.string_field(4) - # Custom TLS handshaker. If empty, defaults to native TLS handshaking - # behavior. - custom_handshaker: "____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(13) - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.tls_certificate_certificate_provider: - warnings.warn( - "CommonTlsContext.tls_certificate_certificate_provider is deprecated", - DeprecationWarning, - ) - if self.tls_certificate_certificate_provider_instance: - warnings.warn( - "CommonTlsContext.tls_certificate_certificate_provider_instance is deprecated", - DeprecationWarning, - ) - if self.validation_context_certificate_provider: - warnings.warn( - "CommonTlsContext.validation_context_certificate_provider is deprecated", - DeprecationWarning, - ) - if self.validation_context_certificate_provider_instance: - warnings.warn( - "CommonTlsContext.validation_context_certificate_provider_instance is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class CommonTlsContextCertificateProvider(betterproto.Message): - """ - Config for Certificate provider to get certificates. This provider should - allow certificates to be fetched/refreshed over the network asynchronously - with respect to the TLS handshake. DEPRECATED: This message is not - currently used, but if we ever do need it, we will want to move it out of - CommonTlsContext and into common.proto, similar to the existing - CertificateProviderPluginInstance message. [#not-implemented-hide:] - """ - - # opaque name used to specify certificate instances or types. For example, - # "ROOTCA" to specify a root-certificate (validation context) or "TLS" to - # specify a new tls-certificate. - name: str = betterproto.string_field(1) - typed_config: "____config_core_v3__.TypedExtensionConfig" = ( - betterproto.message_field(2, group="config") - ) - - -@dataclass(eq=False, repr=False) -class CommonTlsContextCertificateProviderInstance(betterproto.Message): - """ - Similar to CertificateProvider above, but allows the provider instances to - be configured on the client side instead of being sent from the control - plane. DEPRECATED: This message was moved outside of CommonTlsContext and - now lives in common.proto. [#not-implemented-hide:] - """ - - # Provider instance name. This name must be defined in the client's - # configuration (e.g., a bootstrap file) to correspond to a provider instance - # (i.e., the same data in the typed_config field that would be sent in the - # CertificateProvider message if the config was sent by the control plane). - # If not present, defaults to "default". Instance names should generally be - # defined not in terms of the underlying provider implementation (e.g., - # "file_watcher") but rather in terms of the function of the certificates - # (e.g., "foo_deployment_identity"). - instance_name: str = betterproto.string_field(1) - # Opaque name used to specify certificate instances or types. For example, - # "ROOTCA" to specify a root-certificate (validation context) or - # "example.com" to specify a certificate for a particular domain. Not all - # provider instances will actually use this field, so the value defaults to - # the empty string. - certificate_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class CommonTlsContextCombinedCertificateValidationContext(betterproto.Message): - # How to validate peer certificates. - default_validation_context: "CertificateValidationContext" = ( - betterproto.message_field(1) - ) - # Config for fetching validation context via SDS API. Note SDS API allows - # certificates to be fetched/refreshed over the network asynchronously with - # respect to the TLS handshake. - validation_context_sds_secret_config: "SdsSecretConfig" = betterproto.message_field( - 2 - ) - # Certificate provider for fetching CA certs. This will populate the - # *default_validation_context.trusted_ca* field. [#not-implemented-hide:] - validation_context_certificate_provider: "CommonTlsContextCertificateProvider" = ( - betterproto.message_field(3) - ) - # Certificate provider instance for fetching CA certs. This will populate the - # *default_validation_context.trusted_ca* field. [#not-implemented-hide:] - validation_context_certificate_provider_instance: "CommonTlsContextCertificateProviderInstance" = betterproto.message_field( - 4 - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.validation_context_certificate_provider: - warnings.warn( - "CommonTlsContextCombinedCertificateValidationContext.validation_context_certificate_provider is deprecated", - DeprecationWarning, - ) - if self.validation_context_certificate_provider_instance: - warnings.warn( - "CommonTlsContextCombinedCertificateValidationContext.validation_context_certificate_provider_instance is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class SpiffeCertValidatorConfig(betterproto.Message): - """ - Configuration specific to the `SPIFFE `_ - certificate validator. Example: .. validated-code-block:: yaml :type- - name: - envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - custom_validator_config: name: envoy.tls.cert_validator.spiffe - typed_config: "@type": type.googleapis.com/envoy.extensions.transport - _sockets.tls.v3.SPIFFECertValidatorConfig trust_domains: - - name: foo.com trust_bundle: filename: "foo.pem" - - name: envoy.com trust_bundle: filename: "envoy.pem" In - this example, a presented peer certificate whose SAN matches - `spiffe//foo.com/**` is validated against the "foo.pem" x.509 certificate. - All the trust bundles are isolated from each other, so no trust domain can - mint a SVID belonging to another trust domain. That means, in this example, - a SVID signed by `envoy.com`'s CA with `spiffe//foo.com/**` SAN would be - rejected since Envoy selects the trust bundle according to the presented - SAN before validate the certificate. Note that SPIFFE validator inherits - and uses the following options from :ref:`CertificateValidationContext `. - :ref:`allow_expired_certificate ` to allow expired certificates. - :ref:`match_typed_subject_alt_names ` to match **URI** SAN of - certificates. Unlike the default validator, SPIFFE validator only matches - **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other - SAN types. - """ - - # This field specifies trust domains used for validating incoming - # X.509-SVID(s). - trust_domains: List[ - "SpiffeCertValidatorConfigTrustDomain" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class SpiffeCertValidatorConfigTrustDomain(betterproto.Message): - # Name of the trust domain, `example.com`, `foo.bar.gov` for example. Note - # that this must *not* have "spiffe://" prefix. - name: str = betterproto.string_field(1) - # Specify a data source holding x.509 trust bundle used for validating - # incoming SVID(s) in this trust domain. - trust_bundle: "____config_core_v3__.DataSource" = betterproto.message_field(2) - - -from .....config.core import v3 as ____config_core_v3__ -from .....type.matcher import v3 as ____type_matcher_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/generic/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/generic/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/generic/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/generic/v3/__init__.py deleted file mode 100644 index e942bb9..0000000 --- a/src/envoy_data_plane/envoy/extensions/upstreams/http/generic/v3/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GenericConnectionPoolProto(betterproto.Message): - """ - A connection pool which forwards downstream HTTP as TCP or HTTP to - upstream, based on CONNECT configuration. [#extension: - envoy.upstreams.http.generic] - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/http/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/http/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/http/v3/__init__.py deleted file mode 100644 index 238bf5e..0000000 --- a/src/envoy_data_plane/envoy/extensions/upstreams/http/http/v3/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HttpConnectionPoolProto(betterproto.Message): - """ - A connection pool which forwards downstream HTTP as HTTP to upstream. - [#extension: envoy.upstreams.http.http] - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/tcp/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/tcp/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/tcp/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/tcp/v3/__init__.py deleted file mode 100644 index 5edb4e8..0000000 --- a/src/envoy_data_plane/envoy/extensions/upstreams/http/tcp/v3/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class TcpConnectionPoolProto(betterproto.Message): - """ - A connection pool which forwards downstream HTTP as TCP to upstream, - [#extension: envoy.upstreams.http.tcp] - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/http/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/http/v3/__init__.py deleted file mode 100644 index 80d7451..0000000 --- a/src/envoy_data_plane/envoy/extensions/upstreams/http/v3/__init__.py +++ /dev/null @@ -1,146 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/upstreams/http/v3/http_protocol_options.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class HttpProtocolOptions(betterproto.Message): - """ - HttpProtocolOptions specifies Http upstream protocol options. This object - is used in :ref:`typed_extension_protocol_options`, keyed by the name - `envoy.extensions.upstreams.http.v3.HttpProtocolOptions`. This controls - what protocol(s) should be used for upstream and how said protocol(s) are - configured. This replaces the prior pattern of explicit protocol - configuration directly in the cluster. So a configuration like this, - explicitly configuring the use of HTTP/2 upstream: .. code:: clusters: - - name: some_service connect_timeout: 5s - upstream_http_protocol_options: auto_sni: true - common_http_protocol_options: idle_timeout: 1s - http2_protocol_options: max_concurrent_streams: 100 .... - [further cluster config] Would now look like this: .. code:: clusters: - - name: some_service connect_timeout: 5s - typed_extension_protocol_options: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: "@type": - type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions - upstream_http_protocol_options: auto_sni: true - common_http_protocol_options: idle_timeout: 1s - explicit_http_config: http2_protocol_options: - max_concurrent_streams: 100 .... [further cluster config] [#next- - free-field: 6] - """ - - # This contains options common across HTTP/1 and HTTP/2 - common_http_protocol_options: "____config_core_v3__.HttpProtocolOptions" = ( - betterproto.message_field(1) - ) - # This contains common protocol options which are only applied upstream. - upstream_http_protocol_options: "____config_core_v3__.UpstreamHttpProtocolOptions" = betterproto.message_field( - 2 - ) - # To explicitly configure either HTTP/1 or HTTP/2 (but not both!) use - # *explicit_http_config*. If the *explicit_http_config* is empty, HTTP/1.1 is - # used. - explicit_http_config: "HttpProtocolOptionsExplicitHttpConfig" = ( - betterproto.message_field(3, group="upstream_protocol_options") - ) - # This allows switching on protocol based on what protocol the downstream - # connection used. - use_downstream_protocol_config: "HttpProtocolOptionsUseDownstreamHttpConfig" = ( - betterproto.message_field(4, group="upstream_protocol_options") - ) - # This allows switching on protocol based on ALPN - auto_config: "HttpProtocolOptionsAutoHttpConfig" = betterproto.message_field( - 5, group="upstream_protocol_options" - ) - - -@dataclass(eq=False, repr=False) -class HttpProtocolOptionsExplicitHttpConfig(betterproto.Message): - """ - If this is used, the cluster will only operate on one of the possible - upstream protocols. Note that HTTP/2 or above should generally be used for - upstream gRPC clusters. - """ - - http_protocol_options: "____config_core_v3__.Http1ProtocolOptions" = ( - betterproto.message_field(1, group="protocol_config") - ) - http2_protocol_options: "____config_core_v3__.Http2ProtocolOptions" = ( - betterproto.message_field(2, group="protocol_config") - ) - # .. warning:: QUIC support is currently alpha and should be used with - # caution. Please see :ref:`here ` for details. - http3_protocol_options: "____config_core_v3__.Http3ProtocolOptions" = ( - betterproto.message_field(3, group="protocol_config") - ) - - -@dataclass(eq=False, repr=False) -class HttpProtocolOptionsUseDownstreamHttpConfig(betterproto.Message): - """ - If this is used, the cluster can use either of the configured protocols, - and will use whichever protocol was used by the downstream connection. If - HTTP/3 is configured for downstream and not configured for upstream, HTTP/3 - requests will fail over to HTTP/2. - """ - - http_protocol_options: "____config_core_v3__.Http1ProtocolOptions" = ( - betterproto.message_field(1) - ) - http2_protocol_options: "____config_core_v3__.Http2ProtocolOptions" = ( - betterproto.message_field(2) - ) - # .. warning:: QUIC support is currently alpha and should be used with - # caution. Please see :ref:`here ` for details. - http3_protocol_options: "____config_core_v3__.Http3ProtocolOptions" = ( - betterproto.message_field(3) - ) - - -@dataclass(eq=False, repr=False) -class HttpProtocolOptionsAutoHttpConfig(betterproto.Message): - """ - If this is used, the cluster can use either HTTP/1 or HTTP/2, and will use - whichever protocol is negotiated by ALPN with the upstream. Clusters - configured with *AutoHttpConfig* will use the highest available protocol; - HTTP/2 if supported, otherwise HTTP/1. If the upstream does not support - ALPN, *AutoHttpConfig* will fail over to HTTP/1. This can only be used with - transport sockets which support ALPN. Using a transport socket which does - not support ALPN will result in configuration failure. The transport layer - may be configured with custom ALPN, but the default ALPN for the cluster - (or if custom ALPN fails) will be "h2,http/1.1". - """ - - http_protocol_options: "____config_core_v3__.Http1ProtocolOptions" = ( - betterproto.message_field(1) - ) - http2_protocol_options: "____config_core_v3__.Http2ProtocolOptions" = ( - betterproto.message_field(2) - ) - # Unlike HTTP/1 and HTTP/2, HTTP/3 will not be configured unless it is - # present, and (soon) only if there is an indication of server side support. - # See :ref:`here ` for more information on when - # HTTP/3 will be used, and when Envoy will fail over to TCP. .. warning:: - # QUIC support is currently alpha and should be used with caution. Please - # see :ref:`here ` for details. AutoHttpConfig config - # is undergoing especially rapid change and as it is alpha is not - # guaranteed to be API-stable. - http3_protocol_options: "____config_core_v3__.Http3ProtocolOptions" = ( - betterproto.message_field(3) - ) - # The presence of alternate protocols cache options causes the use of the - # alternate protocols cache, which is responsible for parsing and caching - # HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that - # advertise supporting it. .. note:: This is required when HTTP/3 is - # enabled. - alternate_protocols_cache_options: "____config_core_v3__.AlternateProtocolsCacheOptions" = betterproto.message_field( - 4 - ) - - -from .....config.core import v3 as ____config_core_v3__ diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/tcp/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/tcp/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/tcp/generic/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/tcp/generic/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/upstreams/tcp/generic/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/upstreams/tcp/generic/v3/__init__.py deleted file mode 100644 index efe8aec..0000000 --- a/src/envoy_data_plane/envoy/extensions/upstreams/tcp/generic/v3/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class GenericConnectionPoolProto(betterproto.Message): - """ - A connection pool which forwards downstream TCP as TCP or HTTP to upstream, - based on CONNECT configuration. [#extension: envoy.upstreams.tcp.generic] - """ - - pass diff --git a/src/envoy_data_plane/envoy/extensions/wasm/__init__.py b/src/envoy_data_plane/envoy/extensions/wasm/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/wasm/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/wasm/v3/__init__.py deleted file mode 100644 index ee02120..0000000 --- a/src/envoy_data_plane/envoy/extensions/wasm/v3/__init__.py +++ /dev/null @@ -1,160 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/wasm/v3/wasm.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CapabilityRestrictionConfig(betterproto.Message): - """ - Configuration for restricting Proxy-Wasm capabilities available to modules. - """ - - # The Proxy-Wasm capabilities which will be allowed. Capabilities are mapped - # by name. The *SanitizationConfig* which each capability maps to is - # currently unimplemented and ignored, and so should be left empty. The - # capability names are given in the `Proxy-Wasm ABI - # `_. - # Additionally, the following WASI capabilities from `this list `_ are - # implemented and can be allowed: *fd_write*, *fd_read*, *fd_seek*, - # *fd_close*, *fd_fdstat_get*, *environ_get*, *environ_sizes_get*, - # *args_get*, *args_sizes_get*, *proc_exit*, *clock_time_get*, *random_get*. - allowed_capabilities: Dict[str, "SanitizationConfig"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class SanitizationConfig(betterproto.Message): - """ - Configuration for sanitization of inputs to an allowed capability. NOTE: - This is currently unimplemented. - """ - - pass - - -@dataclass(eq=False, repr=False) -class VmConfig(betterproto.Message): - """Configuration for a Wasm VM. [#next-free-field: 8]""" - - # An ID which will be used along with a hash of the wasm code (or the name of - # the registered Null VM plugin) to determine which VM will be used for the - # plugin. All plugins which use the same *vm_id* and code will use the same - # VM. May be left blank. Sharing a VM between plugins can reduce memory - # utilization and make sharing of data easier which may have security - # implications. [#comment: TODO: add ref for details.] - vm_id: str = betterproto.string_field(1) - # The Wasm runtime type. Available Wasm runtime types are registered as - # extensions. The following runtimes are included in Envoy code base: .. - # _extension_envoy.wasm.runtime.null: **envoy.wasm.runtime.null**: Null - # sandbox, the Wasm module must be compiled and linked into the Envoy binary. - # The registered name is given in the *code* field as *inline_string*. .. - # _extension_envoy.wasm.runtime.v8: **envoy.wasm.runtime.v8**: `V8 - # `_-based WebAssembly runtime. .. - # _extension_envoy.wasm.runtime.wamr: **envoy.wasm.runtime.wamr**: `WAMR - # `_-based - # WebAssembly runtime. This runtime is not enabled in the official build. .. - # _extension_envoy.wasm.runtime.wavm: **envoy.wasm.runtime.wavm**: `WAVM - # `_-based WebAssembly runtime. This runtime is not - # enabled in the official build. .. _extension_envoy.wasm.runtime.wasmtime: - # **envoy.wasm.runtime.wasmtime**: `Wasmtime `_-based - # WebAssembly runtime. This runtime is not enabled in the official build. - # [#extension-category: envoy.wasm.runtime] - runtime: str = betterproto.string_field(2) - # The Wasm code that Envoy will execute. - code: "___config_core_v3__.AsyncDataSource" = betterproto.message_field(3) - # The Wasm configuration used in initialization of a new VM (proxy_on_start). - # `google.protobuf.Struct` is serialized as JSON before passing it to the - # plugin. `google.protobuf.BytesValue` and `google.protobuf.StringValue` are - # passed directly without the wrapper. - configuration: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(4) - # Allow the wasm file to include pre-compiled code on VMs which support it. - # Warning: this should only be enable for trusted sources as the precompiled - # code is not verified. - allow_precompiled: bool = betterproto.bool_field(5) - # If true and the code needs to be remotely fetched and it is not in the - # cache then NACK the configuration update and do a background fetch to fill - # the cache, otherwise fetch the code asynchronously and enter warming state. - nack_on_code_cache_miss: bool = betterproto.bool_field(6) - # Specifies environment variables to be injected to this VM which will be - # available through WASI's ``environ_get`` and ``environ_get_sizes`` system - # calls. Note that these functions are mostly implicitly called in your - # language's standard library, so you do not need to call them directly and - # you can access to env vars just like when you do on native platforms. - # Warning: Envoy rejects the configuration if there's conflict of key space. - environment_variables: "EnvironmentVariables" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class EnvironmentVariables(betterproto.Message): - # The keys of *Envoy's* environment variables exposed to this VM. In other - # words, if a key exists in Envoy's environment variables, then that key- - # value pair will be injected. Note that if a key does not exist, it will be - # ignored. - host_env_keys: List[str] = betterproto.string_field(1) - # Explicitly given key-value pairs to be injected to this VM in the form of - # "KEY=VALUE". - key_values: Dict[str, str] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass(eq=False, repr=False) -class PluginConfig(betterproto.Message): - """ - Base Configuration for Wasm Plugins e.g. filters and services. [#next-free- - field: 7] - """ - - # A unique name for a filters/services in a VM for use in identifying the - # filter/service if multiple filters/services are handled by the same *vm_id* - # and *root_id* and for logging/debugging. - name: str = betterproto.string_field(1) - # A unique ID for a set of filters/services in a VM which will share a - # RootContext and Contexts if applicable (e.g. an Wasm HttpFilter and an Wasm - # AccessLog). If left blank, all filters/services with a blank root_id with - # the same *vm_id* will share Context(s). - root_id: str = betterproto.string_field(2) - vm_config: "VmConfig" = betterproto.message_field(3, group="vm") - # Filter/service configuration used to configure or reconfigure a plugin - # (proxy_on_configuration). `google.protobuf.Struct` is serialized as JSON - # before passing it to the plugin. `google.protobuf.BytesValue` and - # `google.protobuf.StringValue` are passed directly without the wrapper. - configuration: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(4) - # If there is a fatal error on the VM (e.g. exception, abort(), on_start or - # on_configure return false), then all plugins associated with the VM will - # either fail closed (by default), e.g. by returning an HTTP 503 error, or - # fail open (if 'fail_open' is set to true) by bypassing the filter. Note: - # when on_start or on_configure return false during xDS updates the xDS - # configuration will be rejected and when on_start or on_configuration return - # false on initial startup the proxy will not start. - fail_open: bool = betterproto.bool_field(5) - # Configuration for restricting Proxy-Wasm capabilities available to modules. - capability_restriction_config: "CapabilityRestrictionConfig" = ( - betterproto.message_field(6) - ) - - -@dataclass(eq=False, repr=False) -class WasmService(betterproto.Message): - """ - WasmService is configured as a built-in *envoy.wasm_service* - :ref:`WasmService ` This opaque configuration will be - used to create a Wasm Service. - """ - - # General plugin configuration. - config: "PluginConfig" = betterproto.message_field(1) - # If true, create a single VM rather than creating one VM per worker. Such a - # singleton can not be used with filters. - singleton: bool = betterproto.bool_field(2) - - -from ....config.core import v3 as ___config_core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/extensions/watchdog/__init__.py b/src/envoy_data_plane/envoy/extensions/watchdog/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/watchdog/profile_action/__init__.py b/src/envoy_data_plane/envoy/extensions/watchdog/profile_action/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/extensions/watchdog/profile_action/v3/__init__.py b/src/envoy_data_plane/envoy/extensions/watchdog/profile_action/v3/__init__.py deleted file mode 100644 index 616c5b4..0000000 --- a/src/envoy_data_plane/envoy/extensions/watchdog/profile_action/v3/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/extensions/watchdog/profile_action/v3/profile_action.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class ProfileActionConfig(betterproto.Message): - """Configuration for the profile watchdog action.""" - - # How long the profile should last. If not set defaults to 5 seconds. - profile_duration: timedelta = betterproto.message_field(1) - # File path to the directory to output profiles. - profile_path: str = betterproto.string_field(2) - # Limits the max number of profiles that can be generated by this action over - # its lifetime to avoid filling the disk. If not set (i.e. it's 0), a default - # of 10 will be used. - max_profiles: int = betterproto.uint64_field(3) diff --git a/src/envoy_data_plane/envoy/service/__init__.py b/src/envoy_data_plane/envoy/service/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/accesslog/__init__.py b/src/envoy_data_plane/envoy/service/accesslog/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/accesslog/v2/__init__.py b/src/envoy_data_plane/envoy/service/accesslog/v2/__init__.py deleted file mode 100644 index 18957be..0000000 --- a/src/envoy_data_plane/envoy/service/accesslog/v2/__init__.py +++ /dev/null @@ -1,107 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/accesslog/v2/als.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsResponse(betterproto.Message): - """ - Empty response for the StreamAccessLogs API. Will never be sent. See below. - """ - - pass - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessage(betterproto.Message): - """ - Stream message for the StreamAccessLogs API. Envoy will open a stream to - the server and stream access logs without ever expecting a response. - """ - - # Identifier data that will only be sent in the first message on the stream. - # This is effectively structured metadata and is a performance optimization. - identifier: "StreamAccessLogsMessageIdentifier" = betterproto.message_field(1) - http_logs: "StreamAccessLogsMessageHttpAccessLogEntries" = ( - betterproto.message_field(2, group="log_entries") - ) - tcp_logs: "StreamAccessLogsMessageTcpAccessLogEntries" = betterproto.message_field( - 3, group="log_entries" - ) - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessageIdentifier(betterproto.Message): - # The node sending the access log messages over the stream. - node: "___api_v2_core__.Node" = betterproto.message_field(1) - # The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - # `. - log_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessageHttpAccessLogEntries(betterproto.Message): - """Wrapper for batches of HTTP access log entries.""" - - log_entry: List[ - "___data_accesslog_v2__.HttpAccessLogEntry" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessageTcpAccessLogEntries(betterproto.Message): - """Wrapper for batches of TCP access log entries.""" - - log_entry: List[ - "___data_accesslog_v2__.TcpAccessLogEntry" - ] = betterproto.message_field(1) - - -class AccessLogServiceStub(betterproto.ServiceStub): - async def stream_access_logs( - self, - request_iterator: Union[ - AsyncIterable["StreamAccessLogsMessage"], - Iterable["StreamAccessLogsMessage"], - ], - ) -> "StreamAccessLogsResponse": - - return await self._stream_unary( - "/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", - request_iterator, - StreamAccessLogsMessage, - StreamAccessLogsResponse, - ) - - -class AccessLogServiceBase(ServiceBase): - async def stream_access_logs( - self, request_iterator: AsyncIterator["StreamAccessLogsMessage"] - ) -> "StreamAccessLogsResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_access_logs(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_access_logs(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs": grpclib.const.Handler( - self.__rpc_stream_access_logs, - grpclib.const.Cardinality.STREAM_UNARY, - StreamAccessLogsMessage, - StreamAccessLogsResponse, - ), - } - - -from ....api.v2 import core as ___api_v2_core__ -from ....data.accesslog import v2 as ___data_accesslog_v2__ diff --git a/src/envoy_data_plane/envoy/service/accesslog/v3/__init__.py b/src/envoy_data_plane/envoy/service/accesslog/v3/__init__.py deleted file mode 100644 index f32943e..0000000 --- a/src/envoy_data_plane/envoy/service/accesslog/v3/__init__.py +++ /dev/null @@ -1,108 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/accesslog/v3/als.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsResponse(betterproto.Message): - """ - Empty response for the StreamAccessLogs API. Will never be sent. See below. - """ - - pass - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessage(betterproto.Message): - """ - Stream message for the StreamAccessLogs API. Envoy will open a stream to - the server and stream access logs without ever expecting a response. - """ - - # Identifier data that will only be sent in the first message on the stream. - # This is effectively structured metadata and is a performance optimization. - identifier: "StreamAccessLogsMessageIdentifier" = betterproto.message_field(1) - http_logs: "StreamAccessLogsMessageHttpAccessLogEntries" = ( - betterproto.message_field(2, group="log_entries") - ) - tcp_logs: "StreamAccessLogsMessageTcpAccessLogEntries" = betterproto.message_field( - 3, group="log_entries" - ) - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessageIdentifier(betterproto.Message): - # The node sending the access log messages over the stream. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - # The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - # `. - log_name: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessageHttpAccessLogEntries(betterproto.Message): - """Wrapper for batches of HTTP access log entries.""" - - log_entry: List[ - "___data_accesslog_v3__.HttpAccessLogEntry" - ] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class StreamAccessLogsMessageTcpAccessLogEntries(betterproto.Message): - """Wrapper for batches of TCP access log entries.""" - - log_entry: List[ - "___data_accesslog_v3__.TcpAccessLogEntry" - ] = betterproto.message_field(1) - - -class AccessLogServiceStub(betterproto.ServiceStub): - async def stream_access_logs( - self, - request_iterator: Union[ - AsyncIterable["StreamAccessLogsMessage"], - Iterable["StreamAccessLogsMessage"], - ], - ) -> "StreamAccessLogsResponse": - - return await self._stream_unary( - "/envoy.service.accesslog.v3.AccessLogService/StreamAccessLogs", - request_iterator, - StreamAccessLogsMessage, - StreamAccessLogsResponse, - ) - - -class AccessLogServiceBase(ServiceBase): - async def stream_access_logs( - self, request_iterator: AsyncIterator["StreamAccessLogsMessage"] - ) -> "StreamAccessLogsResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_access_logs(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_access_logs(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.accesslog.v3.AccessLogService/StreamAccessLogs": grpclib.const.Handler( - self.__rpc_stream_access_logs, - grpclib.const.Cardinality.STREAM_UNARY, - StreamAccessLogsMessage, - StreamAccessLogsResponse, - ), - } - - -from ....config.core import v3 as ___config_core_v3__ -from ....data.accesslog import v3 as ___data_accesslog_v3__ diff --git a/src/envoy_data_plane/envoy/service/auth/__init__.py b/src/envoy_data_plane/envoy/service/auth/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/auth/v2/__init__.py b/src/envoy_data_plane/envoy/service/auth/v2/__init__.py deleted file mode 100644 index 7a99b7b..0000000 --- a/src/envoy_data_plane/envoy/service/auth/v2/__init__.py +++ /dev/null @@ -1,236 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/auth/v2/attribute_context.proto, envoy/service/auth/v2/external_auth.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class AttributeContext(betterproto.Message): - """ - An attribute is a piece of metadata that describes an activity on a - network. For example, the size of an HTTP request, or the status code of an - HTTP response. Each attribute has a type and a name, which is logically - defined as a proto message field of the `AttributeContext`. The - `AttributeContext` is a collection of individual attributes supported by - Envoy authorization system. [#comment: The following items are left out of - this proto Request.Auth field for jwt tokens Request.Api for api management - Origin peer that originated the request Caching Protocol request_context - return values to inject back into the filter chain peer.claims -- from - X.509 extensions Configuration - field mask to send - which return values - from request_context are copied back - which return values are copied into - request_headers] [#next-free-field: 12] - """ - - # The source of a network activity, such as starting a TCP connection. In a - # multi hop network activity, the source represents the sender of the last - # hop. - source: "AttributeContextPeer" = betterproto.message_field(1) - # The destination of a network activity, such as accepting a TCP connection. - # In a multi hop network activity, the destination represents the receiver of - # the last hop. - destination: "AttributeContextPeer" = betterproto.message_field(2) - # Represents a network request, such as an HTTP request. - request: "AttributeContextRequest" = betterproto.message_field(4) - # This is analogous to http_request.headers, however these contents will not - # be sent to the upstream server. Context_extensions provide an extension - # mechanism for sending additional information to the auth server without - # modifying the proto definition. It maps to the internal opaque context in - # the filter chain. - context_extensions: Dict[str, str] = betterproto.map_field( - 10, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # Dynamic metadata associated with the request. - metadata_context: "___api_v2_core__.Metadata" = betterproto.message_field(11) - - -@dataclass(eq=False, repr=False) -class AttributeContextPeer(betterproto.Message): - """ - This message defines attributes for a node that handles a network request. - The node can be either a service or an application that sends, forwards, or - receives the request. Service peers should fill in the `service`, - `principal`, and `labels` as appropriate. [#next-free-field: 6] - """ - - # The address of the peer, this is typically the IP address. It can also be - # UDS path, or others. - address: "___api_v2_core__.Address" = betterproto.message_field(1) - # The canonical service name of the peer. It should be set to :ref:`the HTTP - # x-envoy-downstream-service-cluster - # ` If a more - # trusted source of the service name is available through mTLS/secure naming, - # it should be used. - service: str = betterproto.string_field(2) - # The labels associated with the peer. These could be pod labels for - # Kubernetes or tags for VMs. The source of the labels could be an X.509 - # certificate or other configuration. - labels: Dict[str, str] = betterproto.map_field( - 3, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # The authenticated identity of this peer. For example, the identity - # associated with the workload such as a service account. If an X.509 - # certificate is used to assert the identity this field should be sourced - # from `URI Subject Alternative Names`, `DNS Subject Alternate Names` or - # `Subject` in that order. The primary identity should be the principal. The - # principal format is issuer specific. Example: * SPIFFE format is - # `spiffe://trust-domain/path` * Google account format is - # `https://accounts.google.com/{userid}` - principal: str = betterproto.string_field(4) - # The X.509 certificate used to authenticate the identify of this peer. When - # present, the certificate contents are encoded in URL and PEM format. - certificate: str = betterproto.string_field(5) - - -@dataclass(eq=False, repr=False) -class AttributeContextRequest(betterproto.Message): - """Represents a network request, such as an HTTP request.""" - - # The timestamp when the proxy receives the first byte of the request. - time: datetime = betterproto.message_field(1) - # Represents an HTTP request or an HTTP-like request. - http: "AttributeContextHttpRequest" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class AttributeContextHttpRequest(betterproto.Message): - """ - This message defines attributes for an HTTP request. HTTP/1.x, HTTP/2, gRPC - are all considered as HTTP requests. [#next-free-field: 12] - """ - - # The unique ID for a request, which can be propagated to downstream systems. - # The ID should have low probability of collision within a single day for a - # specific service. For HTTP requests, it should be X-Request-ID or - # equivalent. - id: str = betterproto.string_field(1) - # The HTTP request method, such as `GET`, `POST`. - method: str = betterproto.string_field(2) - # The HTTP request headers. If multiple headers share the same key, they must - # be merged according to the HTTP spec. All header keys must be lower-cased, - # because HTTP header keys are case-insensitive. - headers: Dict[str, str] = betterproto.map_field( - 3, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # The request target, as it appears in the first line of the HTTP request. - # This includes the URL path and query-string. No decoding is performed. - path: str = betterproto.string_field(4) - # The HTTP request `Host` or 'Authority` header value. - host: str = betterproto.string_field(5) - # The HTTP URL scheme, such as `http` and `https`. This is set for HTTP/2 - # requests only. For HTTP/1.1, use "x-forwarded-for" header value to lookup - # the scheme of the request. - scheme: str = betterproto.string_field(6) - # This field is always empty, and exists for compatibility reasons. The HTTP - # URL query is included in `path` field. - query: str = betterproto.string_field(7) - # This field is always empty, and exists for compatibility reasons. The URL - # fragment is not submitted as part of HTTP requests; it is unknowable. - fragment: str = betterproto.string_field(8) - # The HTTP request size in bytes. If unknown, it must be -1. - size: int = betterproto.int64_field(9) - # The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", - # or "HTTP/2". See :repo:`headers.h:ProtocolStrings - # ` for a list of all possible values. - protocol: str = betterproto.string_field(10) - # The HTTP request body. - body: str = betterproto.string_field(11) - - -@dataclass(eq=False, repr=False) -class CheckRequest(betterproto.Message): - # The request attributes. - attributes: "AttributeContext" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DeniedHttpResponse(betterproto.Message): - """HTTP attributes for a denied response.""" - - # This field allows the authorization service to send a HTTP response status - # code to the downstream client other than 403 (Forbidden). - status: "___type__.HttpStatus" = betterproto.message_field(1) - # This field allows the authorization service to send HTTP response headers - # to the downstream client. Note that the `append` field in - # `HeaderValueOption` defaults to false when used in this message. - headers: List["___api_v2_core__.HeaderValueOption"] = betterproto.message_field(2) - # This field allows the authorization service to send a response body data to - # the downstream client. - body: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class OkHttpResponse(betterproto.Message): - """HTTP attributes for an ok response.""" - - # HTTP entity headers in addition to the original request headers. This - # allows the authorization service to append, to add or to override headers - # from the original request before dispatching it to the upstream. Note that - # the `append` field in `HeaderValueOption` defaults to false when used in - # this message. By setting the `append` field to `true`, the filter will - # append the correspondent header value to the matched request header. By - # leaving `append` as false, the filter will either add a new header, or - # override an existing one if there is a match. - headers: List["___api_v2_core__.HeaderValueOption"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class CheckResponse(betterproto.Message): - """Intended for gRPC and Network Authorization servers `only`.""" - - # Status `OK` allows the request. Any other status indicates the request - # should be denied. - status: "____google_rpc__.Status" = betterproto.message_field(1) - # Supplies http attributes for a denied response. - denied_response: "DeniedHttpResponse" = betterproto.message_field( - 2, group="http_response" - ) - # Supplies http attributes for an ok response. - ok_response: "OkHttpResponse" = betterproto.message_field(3, group="http_response") - - -class AuthorizationStub(betterproto.ServiceStub): - async def check(self, *, attributes: "AttributeContext" = None) -> "CheckResponse": - - request = CheckRequest() - if attributes is not None: - request.attributes = attributes - - return await self._unary_unary( - "/envoy.service.auth.v2.Authorization/Check", request, CheckResponse - ) - - -class AuthorizationBase(ServiceBase): - async def check(self, attributes: "AttributeContext") -> "CheckResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_check(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "attributes": request.attributes, - } - - response = await self.check(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.auth.v2.Authorization/Check": grpclib.const.Handler( - self.__rpc_check, - grpclib.const.Cardinality.UNARY_UNARY, - CheckRequest, - CheckResponse, - ), - } - - -from .... import type as ___type__ -from .....google import rpc as ____google_rpc__ -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/service/auth/v2alpha/__init__.py b/src/envoy_data_plane/envoy/service/auth/v2alpha/__init__.py deleted file mode 100644 index 0be9827..0000000 --- a/src/envoy_data_plane/envoy/service/auth/v2alpha/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/auth/v2alpha/external_auth.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class AuthorizationStub(betterproto.ServiceStub): - async def check( - self, *, attributes: "AttributeContext" = None - ) -> "_v2__.CheckResponse": - - request = _v2__.CheckRequest() - if attributes is not None: - request.attributes = attributes - - return await self._unary_unary( - "/envoy.service.auth.v2alpha.Authorization/Check", - request, - _v2__.CheckResponse, - ) - - -class AuthorizationBase(ServiceBase): - async def check(self, attributes: "AttributeContext") -> "_v2__.CheckResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_check(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "attributes": request.attributes, - } - - response = await self.check(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.auth.v2alpha.Authorization/Check": grpclib.const.Handler( - self.__rpc_check, - grpclib.const.Cardinality.UNARY_UNARY, - _v2__.CheckRequest, - _v2__.CheckResponse, - ), - } - - -from .. import v2 as _v2__ diff --git a/src/envoy_data_plane/envoy/service/auth/v3/__init__.py b/src/envoy_data_plane/envoy/service/auth/v3/__init__.py deleted file mode 100644 index 97b75aa..0000000 --- a/src/envoy_data_plane/envoy/service/auth/v3/__init__.py +++ /dev/null @@ -1,309 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/auth/v3/attribute_context.proto, envoy/service/auth/v3/external_auth.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import datetime -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class AttributeContext(betterproto.Message): - """ - An attribute is a piece of metadata that describes an activity on a - network. For example, the size of an HTTP request, or the status code of an - HTTP response. Each attribute has a type and a name, which is logically - defined as a proto message field of the `AttributeContext`. The - `AttributeContext` is a collection of individual attributes supported by - Envoy authorization system. [#comment: The following items are left out of - this proto Request.Auth field for jwt tokens Request.Api for api management - Origin peer that originated the request Caching Protocol request_context - return values to inject back into the filter chain peer.claims -- from - X.509 extensions Configuration - field mask to send - which return values - from request_context are copied back - which return values are copied into - request_headers] [#next-free-field: 12] - """ - - # The source of a network activity, such as starting a TCP connection. In a - # multi hop network activity, the source represents the sender of the last - # hop. - source: "AttributeContextPeer" = betterproto.message_field(1) - # The destination of a network activity, such as accepting a TCP connection. - # In a multi hop network activity, the destination represents the receiver of - # the last hop. - destination: "AttributeContextPeer" = betterproto.message_field(2) - # Represents a network request, such as an HTTP request. - request: "AttributeContextRequest" = betterproto.message_field(4) - # This is analogous to http_request.headers, however these contents will not - # be sent to the upstream server. Context_extensions provide an extension - # mechanism for sending additional information to the auth server without - # modifying the proto definition. It maps to the internal opaque context in - # the filter chain. - context_extensions: Dict[str, str] = betterproto.map_field( - 10, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # Dynamic metadata associated with the request. - metadata_context: "___config_core_v3__.Metadata" = betterproto.message_field(11) - - -@dataclass(eq=False, repr=False) -class AttributeContextPeer(betterproto.Message): - """ - This message defines attributes for a node that handles a network request. - The node can be either a service or an application that sends, forwards, or - receives the request. Service peers should fill in the `service`, - `principal`, and `labels` as appropriate. [#next-free-field: 6] - """ - - # The address of the peer, this is typically the IP address. It can also be - # UDS path, or others. - address: "___config_core_v3__.Address" = betterproto.message_field(1) - # The canonical service name of the peer. It should be set to :ref:`the HTTP - # x-envoy-downstream-service-cluster - # ` If a more - # trusted source of the service name is available through mTLS/secure naming, - # it should be used. - service: str = betterproto.string_field(2) - # The labels associated with the peer. These could be pod labels for - # Kubernetes or tags for VMs. The source of the labels could be an X.509 - # certificate or other configuration. - labels: Dict[str, str] = betterproto.map_field( - 3, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # The authenticated identity of this peer. For example, the identity - # associated with the workload such as a service account. If an X.509 - # certificate is used to assert the identity this field should be sourced - # from `URI Subject Alternative Names`, `DNS Subject Alternate Names` or - # `Subject` in that order. The primary identity should be the principal. The - # principal format is issuer specific. Example: * SPIFFE format is - # `spiffe://trust-domain/path` * Google account format is - # `https://accounts.google.com/{userid}` - principal: str = betterproto.string_field(4) - # The X.509 certificate used to authenticate the identify of this peer. When - # present, the certificate contents are encoded in URL and PEM format. - certificate: str = betterproto.string_field(5) - - -@dataclass(eq=False, repr=False) -class AttributeContextRequest(betterproto.Message): - """Represents a network request, such as an HTTP request.""" - - # The timestamp when the proxy receives the first byte of the request. - time: datetime = betterproto.message_field(1) - # Represents an HTTP request or an HTTP-like request. - http: "AttributeContextHttpRequest" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class AttributeContextHttpRequest(betterproto.Message): - """ - This message defines attributes for an HTTP request. HTTP/1.x, HTTP/2, gRPC - are all considered as HTTP requests. [#next-free-field: 13] - """ - - # The unique ID for a request, which can be propagated to downstream systems. - # The ID should have low probability of collision within a single day for a - # specific service. For HTTP requests, it should be X-Request-ID or - # equivalent. - id: str = betterproto.string_field(1) - # The HTTP request method, such as `GET`, `POST`. - method: str = betterproto.string_field(2) - # The HTTP request headers. If multiple headers share the same key, they must - # be merged according to the HTTP spec. All header keys must be lower-cased, - # because HTTP header keys are case-insensitive. - headers: Dict[str, str] = betterproto.map_field( - 3, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # The request target, as it appears in the first line of the HTTP request. - # This includes the URL path and query-string. No decoding is performed. - path: str = betterproto.string_field(4) - # The HTTP request `Host` or 'Authority` header value. - host: str = betterproto.string_field(5) - # The HTTP URL scheme, such as `http` and `https`. - scheme: str = betterproto.string_field(6) - # This field is always empty, and exists for compatibility reasons. The HTTP - # URL query is included in `path` field. - query: str = betterproto.string_field(7) - # This field is always empty, and exists for compatibility reasons. The URL - # fragment is not submitted as part of HTTP requests; it is unknowable. - fragment: str = betterproto.string_field(8) - # The HTTP request size in bytes. If unknown, it must be -1. - size: int = betterproto.int64_field(9) - # The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", - # or "HTTP/2". See :repo:`headers.h:ProtocolStrings - # ` for a list of all possible values. - protocol: str = betterproto.string_field(10) - # The HTTP request body. - body: str = betterproto.string_field(11) - # The HTTP request body in bytes. This is used instead of :ref:`body - # ` - # when :ref:`pack_as_bytes ` is set to true. - raw_body: bytes = betterproto.bytes_field(12) - - -@dataclass(eq=False, repr=False) -class CheckRequest(betterproto.Message): - # The request attributes. - attributes: "AttributeContext" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DeniedHttpResponse(betterproto.Message): - """HTTP attributes for a denied response.""" - - # This field allows the authorization service to send an HTTP response status - # code to the downstream client. If not set, Envoy sends ``403 Forbidden`` - # HTTP status code by default. - status: "___type_v3__.HttpStatus" = betterproto.message_field(1) - # This field allows the authorization service to send HTTP response headers - # to the downstream client. Note that the :ref:`append field in - # HeaderValueOption - # ` defaults to - # false when used in this message. - headers: List["___config_core_v3__.HeaderValueOption"] = betterproto.message_field( - 2 - ) - # This field allows the authorization service to send a response body data to - # the downstream client. - body: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class OkHttpResponse(betterproto.Message): - """HTTP attributes for an OK response. [#next-free-field: 9]""" - - # HTTP entity headers in addition to the original request headers. This - # allows the authorization service to append, to add or to override headers - # from the original request before dispatching it to the upstream. Note that - # the :ref:`append field in HeaderValueOption - # ` defaults to - # false when used in this message. By setting the `append` field to `true`, - # the filter will append the correspondent header value to the matched - # request header. By leaving `append` as false, the filter will either add a - # new header, or override an existing one if there is a match. - headers: List["___config_core_v3__.HeaderValueOption"] = betterproto.message_field( - 2 - ) - # HTTP entity headers to remove from the original request before dispatching - # it to the upstream. This allows the authorization service to act on auth - # related headers (like `Authorization`), process them, and consume them. - # Under this model, the upstream will either receive the request (if it's - # authorized) or not receive it (if it's not), but will not see headers - # containing authorization credentials. Pseudo headers (such as `:authority`, - # `:method`, `:path` etc), as well as the header `Host`, may not be removed - # as that would make the request malformed. If mentioned in - # `headers_to_remove` these special headers will be ignored. When using the - # HTTP service this must instead be set by the HTTP authorization service as - # a comma separated list like so: ``x-envoy-auth-headers-to-remove: one-auth- - # header, another-auth-header``. - headers_to_remove: List[str] = betterproto.string_field(5) - # This field has been deprecated in favor of - # :ref:`CheckResponse.dynamic_metadata - # `. Until - # it is removed, setting this field overrides - # :ref:`CheckResponse.dynamic_metadata - # `. - dynamic_metadata: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(3) - ) - # This field allows the authorization service to send HTTP response headers - # to the downstream client on success. Note that the :ref:`append field in - # HeaderValueOption - # ` defaults to - # false when used in this message. - response_headers_to_add: List[ - "___config_core_v3__.HeaderValueOption" - ] = betterproto.message_field(6) - # This field allows the authorization service to set (and overwrite) query - # string parameters on the original request before it is sent upstream. - query_parameters_to_set: List[ - "___config_core_v3__.QueryParameter" - ] = betterproto.message_field(7) - # This field allows the authorization service to specify which query - # parameters should be removed from the original request before it is sent - # upstream. Each element in this list is a case-sensitive query parameter - # name to be removed. - query_parameters_to_remove: List[str] = betterproto.string_field(8) - - def __post_init__(self) -> None: - super().__post_init__() - if self.dynamic_metadata: - warnings.warn( - "OkHttpResponse.dynamic_metadata is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class CheckResponse(betterproto.Message): - """Intended for gRPC and Network Authorization servers `only`.""" - - # Status `OK` allows the request. Any other status indicates the request - # should be denied, and for HTTP filter, if not overridden by :ref:`denied - # HTTP response status - # ` Envoy sends - # ``403 Forbidden`` HTTP status code by default. - status: "____google_rpc__.Status" = betterproto.message_field(1) - # Supplies http attributes for a denied response. - denied_response: "DeniedHttpResponse" = betterproto.message_field( - 2, group="http_response" - ) - # Supplies http attributes for an ok response. - ok_response: "OkHttpResponse" = betterproto.message_field(3, group="http_response") - # Optional response metadata that will be emitted as dynamic metadata to be - # consumed by the next filter. This metadata lives in a namespace specified - # by the canonical name of extension filter that requires it: - - # :ref:`envoy.filters.http.ext_authz - # ` for HTTP filter. - - # :ref:`envoy.filters.network.ext_authz - # ` for network filter. - dynamic_metadata: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(4) - ) - - -class AuthorizationStub(betterproto.ServiceStub): - async def check(self, *, attributes: "AttributeContext" = None) -> "CheckResponse": - - request = CheckRequest() - if attributes is not None: - request.attributes = attributes - - return await self._unary_unary( - "/envoy.service.auth.v3.Authorization/Check", request, CheckResponse - ) - - -class AuthorizationBase(ServiceBase): - async def check(self, attributes: "AttributeContext") -> "CheckResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_check(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "attributes": request.attributes, - } - - response = await self.check(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.auth.v3.Authorization/Check": grpclib.const.Handler( - self.__rpc_check, - grpclib.const.Cardinality.UNARY_UNARY, - CheckRequest, - CheckResponse, - ), - } - - -from .....google import rpc as ____google_rpc__ -from ....config.core import v3 as ___config_core_v3__ -from ....type import v3 as ___type_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/cluster/__init__.py b/src/envoy_data_plane/envoy/service/cluster/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/cluster/v3/__init__.py b/src/envoy_data_plane/envoy/service/cluster/v3/__init__.py deleted file mode 100644 index 1aa13f2..0000000 --- a/src/envoy_data_plane/envoy/service/cluster/v3/__init__.py +++ /dev/null @@ -1,163 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/cluster/v3/cds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class CdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -class ClusterDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_clusters( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.cluster.v3.ClusterDiscoveryService/StreamClusters", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def delta_clusters( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.cluster.v3.ClusterDiscoveryService/DeltaClusters", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def fetch_clusters( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.cluster.v3.ClusterDiscoveryService/FetchClusters", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class ClusterDiscoveryServiceBase(ServiceBase): - async def stream_clusters( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_clusters( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_clusters( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_clusters(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_clusters, - stream, - request_kwargs, - ) - - async def __rpc_delta_clusters(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_clusters, - stream, - request_kwargs, - ) - - async def __rpc_fetch_clusters(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_clusters(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.cluster.v3.ClusterDiscoveryService/StreamClusters": grpclib.const.Handler( - self.__rpc_stream_clusters, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.cluster.v3.ClusterDiscoveryService/DeltaClusters": grpclib.const.Handler( - self.__rpc_delta_clusters, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.cluster.v3.ClusterDiscoveryService/FetchClusters": grpclib.const.Handler( - self.__rpc_fetch_clusters, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -from ...discovery import v3 as __discovery_v3__ diff --git a/src/envoy_data_plane/envoy/service/discovery/__init__.py b/src/envoy_data_plane/envoy/service/discovery/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/discovery/v2/__init__.py b/src/envoy_data_plane/envoy/service/discovery/v2/__init__.py deleted file mode 100644 index be8bf43..0000000 --- a/src/envoy_data_plane/envoy/service/discovery/v2/__init__.py +++ /dev/null @@ -1,581 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/discovery/v2/ads.proto, envoy/service/discovery/v2/hds.proto, envoy/service/discovery/v2/rtds.proto, envoy/service/discovery/v2/sds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class CapabilityProtocol(betterproto.Enum): - HTTP = 0 - TCP = 1 - REDIS = 2 - - -@dataclass(eq=False, repr=False) -class AdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 - """ - - pass - - -@dataclass(eq=False, repr=False) -class Capability(betterproto.Message): - """ - Defines supported protocols etc, so the management server can assign proper - endpoints to healthcheck. - """ - - health_check_protocols: List["CapabilityProtocol"] = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckRequest(betterproto.Message): - node: "___api_v2_core__.Node" = betterproto.message_field(1) - capability: "Capability" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class EndpointHealth(betterproto.Message): - endpoint: "___api_v2_endpoint__.Endpoint" = betterproto.message_field(1) - health_status: "___api_v2_core__.HealthStatus" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class EndpointHealthResponse(betterproto.Message): - endpoints_health: List["EndpointHealth"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckRequestOrEndpointHealthResponse(betterproto.Message): - health_check_request: "HealthCheckRequest" = betterproto.message_field( - 1, group="request_type" - ) - endpoint_health_response: "EndpointHealthResponse" = betterproto.message_field( - 2, group="request_type" - ) - - -@dataclass(eq=False, repr=False) -class LocalityEndpoints(betterproto.Message): - locality: "___api_v2_core__.Locality" = betterproto.message_field(1) - endpoints: List["___api_v2_endpoint__.Endpoint"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterHealthCheck(betterproto.Message): - """ - The cluster name and locality is provided to Envoy for the endpoints that - it health checks to support statistics reporting, logging and debugging by - the Envoy instance (outside of HDS). For maximum usefulness, it should - match the same cluster structure as that provided by EDS. - """ - - cluster_name: str = betterproto.string_field(1) - health_checks: List["___api_v2_core__.HealthCheck"] = betterproto.message_field(2) - locality_endpoints: List["LocalityEndpoints"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class HealthCheckSpecifier(betterproto.Message): - cluster_health_checks: List["ClusterHealthCheck"] = betterproto.message_field(1) - # The default is 1 second. - interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class SdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 - """ - - pass - - -@dataclass(eq=False, repr=False) -class RtdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 - """ - - pass - - -@dataclass(eq=False, repr=False) -class Runtime(betterproto.Message): - """ - RTDS resource type. This describes a layer in the runtime virtual - filesystem. - """ - - # Runtime resource name. This makes the Runtime a self-describing xDS - # resource. - name: str = betterproto.string_field(1) - layer: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -class AggregatedDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_aggregated_resources( - self, - request_iterator: Union[ - AsyncIterable["___api_v2__.DiscoveryRequest"], - Iterable["___api_v2__.DiscoveryRequest"], - ], - ) -> AsyncIterator["___api_v2__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources", - request_iterator, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ): - yield response - - async def delta_aggregated_resources( - self, - request_iterator: Union[ - AsyncIterable["___api_v2__.DeltaDiscoveryRequest"], - Iterable["___api_v2__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["___api_v2__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v2.AggregatedDiscoveryService/DeltaAggregatedResources", - request_iterator, - ___api_v2__.DeltaDiscoveryRequest, - ___api_v2__.DeltaDiscoveryResponse, - ): - yield response - - -class HealthDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_health_check( - self, - request_iterator: Union[ - AsyncIterable["HealthCheckRequestOrEndpointHealthResponse"], - Iterable["HealthCheckRequestOrEndpointHealthResponse"], - ], - ) -> AsyncIterator["HealthCheckSpecifier"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v2.HealthDiscoveryService/StreamHealthCheck", - request_iterator, - HealthCheckRequestOrEndpointHealthResponse, - HealthCheckSpecifier, - ): - yield response - - async def fetch_health_check( - self, - *, - health_check_request: "HealthCheckRequest" = None, - endpoint_health_response: "EndpointHealthResponse" = None - ) -> "HealthCheckSpecifier": - - request = HealthCheckRequestOrEndpointHealthResponse() - if health_check_request is not None: - request.health_check_request = health_check_request - if endpoint_health_response is not None: - request.endpoint_health_response = endpoint_health_response - - return await self._unary_unary( - "/envoy.service.discovery.v2.HealthDiscoveryService/FetchHealthCheck", - request, - HealthCheckSpecifier, - ) - - -class SecretDiscoveryServiceStub(betterproto.ServiceStub): - async def delta_secrets( - self, - request_iterator: Union[ - AsyncIterable["___api_v2__.DeltaDiscoveryRequest"], - Iterable["___api_v2__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["___api_v2__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v2.SecretDiscoveryService/DeltaSecrets", - request_iterator, - ___api_v2__.DeltaDiscoveryRequest, - ___api_v2__.DeltaDiscoveryResponse, - ): - yield response - - async def stream_secrets( - self, - request_iterator: Union[ - AsyncIterable["___api_v2__.DiscoveryRequest"], - Iterable["___api_v2__.DiscoveryRequest"], - ], - ) -> AsyncIterator["___api_v2__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v2.SecretDiscoveryService/StreamSecrets", - request_iterator, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ): - yield response - - async def fetch_secrets( - self, - *, - version_info: str = "", - node: "core.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "___google_rpc__.Status" = None - ) -> "___api_v2__.DiscoveryResponse": - resource_names = resource_names or [] - - request = ___api_v2__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.discovery.v2.SecretDiscoveryService/FetchSecrets", - request, - ___api_v2__.DiscoveryResponse, - ) - - -class RuntimeDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_runtime( - self, - request_iterator: Union[ - AsyncIterable["___api_v2__.DiscoveryRequest"], - Iterable["___api_v2__.DiscoveryRequest"], - ], - ) -> AsyncIterator["___api_v2__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v2.RuntimeDiscoveryService/StreamRuntime", - request_iterator, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ): - yield response - - async def delta_runtime( - self, - request_iterator: Union[ - AsyncIterable["___api_v2__.DeltaDiscoveryRequest"], - Iterable["___api_v2__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["___api_v2__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v2.RuntimeDiscoveryService/DeltaRuntime", - request_iterator, - ___api_v2__.DeltaDiscoveryRequest, - ___api_v2__.DeltaDiscoveryResponse, - ): - yield response - - async def fetch_runtime( - self, - *, - version_info: str = "", - node: "core.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "___google_rpc__.Status" = None - ) -> "___api_v2__.DiscoveryResponse": - resource_names = resource_names or [] - - request = ___api_v2__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.discovery.v2.RuntimeDiscoveryService/FetchRuntime", - request, - ___api_v2__.DiscoveryResponse, - ) - - -class AggregatedDiscoveryServiceBase(ServiceBase): - async def stream_aggregated_resources( - self, request_iterator: AsyncIterator["___api_v2__.DiscoveryRequest"] - ) -> AsyncIterator["___api_v2__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_aggregated_resources( - self, request_iterator: AsyncIterator["___api_v2__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["___api_v2__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_aggregated_resources( - self, stream: grpclib.server.Stream - ) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_aggregated_resources, - stream, - request_kwargs, - ) - - async def __rpc_delta_aggregated_resources( - self, stream: grpclib.server.Stream - ) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_aggregated_resources, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources": grpclib.const.Handler( - self.__rpc_stream_aggregated_resources, - grpclib.const.Cardinality.STREAM_STREAM, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ), - "/envoy.service.discovery.v2.AggregatedDiscoveryService/DeltaAggregatedResources": grpclib.const.Handler( - self.__rpc_delta_aggregated_resources, - grpclib.const.Cardinality.STREAM_STREAM, - ___api_v2__.DeltaDiscoveryRequest, - ___api_v2__.DeltaDiscoveryResponse, - ), - } - - -class HealthDiscoveryServiceBase(ServiceBase): - async def stream_health_check( - self, - request_iterator: AsyncIterator["HealthCheckRequestOrEndpointHealthResponse"], - ) -> AsyncIterator["HealthCheckSpecifier"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_health_check( - self, - health_check_request: "HealthCheckRequest", - endpoint_health_response: "EndpointHealthResponse", - ) -> "HealthCheckSpecifier": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_health_check(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_health_check, - stream, - request_kwargs, - ) - - async def __rpc_fetch_health_check(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "health_check_request": request.health_check_request, - "endpoint_health_response": request.endpoint_health_response, - } - - response = await self.fetch_health_check(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.discovery.v2.HealthDiscoveryService/StreamHealthCheck": grpclib.const.Handler( - self.__rpc_stream_health_check, - grpclib.const.Cardinality.STREAM_STREAM, - HealthCheckRequestOrEndpointHealthResponse, - HealthCheckSpecifier, - ), - "/envoy.service.discovery.v2.HealthDiscoveryService/FetchHealthCheck": grpclib.const.Handler( - self.__rpc_fetch_health_check, - grpclib.const.Cardinality.UNARY_UNARY, - HealthCheckRequestOrEndpointHealthResponse, - HealthCheckSpecifier, - ), - } - - -class SecretDiscoveryServiceBase(ServiceBase): - async def delta_secrets( - self, request_iterator: AsyncIterator["___api_v2__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["___api_v2__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def stream_secrets( - self, request_iterator: AsyncIterator["___api_v2__.DiscoveryRequest"] - ) -> AsyncIterator["___api_v2__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_secrets( - self, - version_info: str, - node: "core.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "___google_rpc__.Status", - ) -> "___api_v2__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_delta_secrets(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_secrets, - stream, - request_kwargs, - ) - - async def __rpc_stream_secrets(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_secrets, - stream, - request_kwargs, - ) - - async def __rpc_fetch_secrets(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_secrets(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.discovery.v2.SecretDiscoveryService/DeltaSecrets": grpclib.const.Handler( - self.__rpc_delta_secrets, - grpclib.const.Cardinality.STREAM_STREAM, - ___api_v2__.DeltaDiscoveryRequest, - ___api_v2__.DeltaDiscoveryResponse, - ), - "/envoy.service.discovery.v2.SecretDiscoveryService/StreamSecrets": grpclib.const.Handler( - self.__rpc_stream_secrets, - grpclib.const.Cardinality.STREAM_STREAM, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ), - "/envoy.service.discovery.v2.SecretDiscoveryService/FetchSecrets": grpclib.const.Handler( - self.__rpc_fetch_secrets, - grpclib.const.Cardinality.UNARY_UNARY, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ), - } - - -class RuntimeDiscoveryServiceBase(ServiceBase): - async def stream_runtime( - self, request_iterator: AsyncIterator["___api_v2__.DiscoveryRequest"] - ) -> AsyncIterator["___api_v2__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_runtime( - self, request_iterator: AsyncIterator["___api_v2__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["___api_v2__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_runtime( - self, - version_info: str, - node: "core.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "___google_rpc__.Status", - ) -> "___api_v2__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_runtime(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_runtime, - stream, - request_kwargs, - ) - - async def __rpc_delta_runtime(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_runtime, - stream, - request_kwargs, - ) - - async def __rpc_fetch_runtime(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_runtime(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.discovery.v2.RuntimeDiscoveryService/StreamRuntime": grpclib.const.Handler( - self.__rpc_stream_runtime, - grpclib.const.Cardinality.STREAM_STREAM, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ), - "/envoy.service.discovery.v2.RuntimeDiscoveryService/DeltaRuntime": grpclib.const.Handler( - self.__rpc_delta_runtime, - grpclib.const.Cardinality.STREAM_STREAM, - ___api_v2__.DeltaDiscoveryRequest, - ___api_v2__.DeltaDiscoveryResponse, - ), - "/envoy.service.discovery.v2.RuntimeDiscoveryService/FetchRuntime": grpclib.const.Handler( - self.__rpc_fetch_runtime, - grpclib.const.Cardinality.UNARY_UNARY, - ___api_v2__.DiscoveryRequest, - ___api_v2__.DiscoveryResponse, - ), - } - - -from ....api import v2 as ___api_v2__ -from ....api.v2 import core as ___api_v2_core__ -from ....api.v2 import endpoint as ___api_v2_endpoint__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/discovery/v3/__init__.py b/src/envoy_data_plane/envoy/service/discovery/v3/__init__.py deleted file mode 100644 index 200708a..0000000 --- a/src/envoy_data_plane/envoy/service/discovery/v3/__init__.py +++ /dev/null @@ -1,335 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/discovery/v3/ads.proto, envoy/service/discovery/v3/discovery.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class DiscoveryRequest(betterproto.Message): - """ - A DiscoveryRequest requests a set of versioned resources of the same type - for a given Envoy node on some API. [#next-free-field: 7] - """ - - # The version_info provided in the request messages will be the version_info - # received with the most recent successfully processed response or empty on - # the first request. It is expected that no new request is sent after a - # response is received until the Envoy instance is ready to ACK/NACK the new - # configuration. ACK/NACK takes place by returning the new API config version - # as applied or the previous API config version respectively. Each type_url - # (see below) has an independent version associated with it. - version_info: str = betterproto.string_field(1) - # The node making the request. - node: "___config_core_v3__.Node" = betterproto.message_field(2) - # List of resources to subscribe to, e.g. list of cluster names or a route - # configuration name. If this is empty, all resources for the API are - # returned. LDS/CDS may have empty resource_names, which will cause all - # resources for the Envoy instance to be returned. The LDS and CDS responses - # will then imply a number of resources that need to be fetched via EDS/RDS, - # which will be explicitly enumerated in resource_names. - resource_names: List[str] = betterproto.string_field(3) - # Type of the resource that is being requested, e.g. - # "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - # in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - # required for ADS. - type_url: str = betterproto.string_field(4) - # nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - # discussion on version_info and the DiscoveryResponse nonce comment. This - # may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - # or 2) the client has not yet accepted an update in this xDS stream (unlike - # delta, where it is populated only for new explicit ACKs). - response_nonce: str = betterproto.string_field(5) - # This is populated when the previous :ref:`DiscoveryResponse - # ` failed to update - # configuration. The *message* field in *error_details* provides the Envoy - # internal exception related to the failure. It is only intended for - # consumption during manual debugging, the string provided is not guaranteed - # to be stable across Envoy versions. - error_detail: "____google_rpc__.Status" = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class DiscoveryResponse(betterproto.Message): - """[#next-free-field: 7]""" - - # The version of the response data. - version_info: str = betterproto.string_field(1) - # The response resources. These resources are typed and depend on the API - # being called. - resources: List["betterproto_lib_google_protobuf.Any"] = betterproto.message_field( - 2 - ) - # [#not-implemented-hide:] Canary is used to support two Envoy command line - # flags: * --terminate-on-canary-transition-failure. When set, Envoy is able - # to terminate if it detects that configuration is stuck at canary. - # Consider this example sequence of updates: - Management server applies - # a canary config successfully. - Management server rolls back to a - # production config. - Envoy rejects the new production config. Since - # there is no sensible way to continue receiving configuration updates, - # Envoy will then terminate and apply production config from a clean slate. - # * --dry-run-canary. When set, a canary response will never be applied, only - # validated via a dry run. - canary: bool = betterproto.bool_field(3) - # Type URL for resources. Identifies the xDS API when muxing over ADS. Must - # be consistent with the type_url in the 'resources' repeated Any (if non- - # empty). - type_url: str = betterproto.string_field(4) - # For gRPC based subscriptions, the nonce provides a way to explicitly ack a - # specific DiscoveryResponse in a following DiscoveryRequest. Additional - # messages may have been sent by Envoy to the management server for the - # previous version on the stream prior to this DiscoveryResponse, that were - # unprocessed at response send time. The nonce allows the management server - # to ignore any further DiscoveryRequests for the previous version until a - # DiscoveryRequest bearing the nonce. The nonce is optional and is not - # required for non-stream based xDS implementations. - nonce: str = betterproto.string_field(5) - # The control plane instance that sent the response. - control_plane: "___config_core_v3__.ControlPlane" = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class DeltaDiscoveryRequest(betterproto.Message): - """ - DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC - endpoint for Delta xDS. With Delta xDS, the DeltaDiscoveryResponses do not - need to include a full snapshot of the tracked resources. Instead, - DeltaDiscoveryResponses are a diff to the state of a xDS client. In Delta - XDS there are per-resource versions, which allow tracking state at the - resource granularity. An xDS Delta session is always in the context of a - gRPC bidirectional stream. This allows the xDS server to keep track of the - state of xDS clients connected to it. In Delta xDS the nonce field is - required and used to pair DeltaDiscoveryResponse to a DeltaDiscoveryRequest - ACK or NACK. Optionally, a response message level system_version_info is - present for debugging purposes only. DeltaDiscoveryRequest plays two - independent roles. Any DeltaDiscoveryRequest can be either or both of: [1] - informing the server of what resources the client has gained/lost interest - in (using resource_names_subscribe and resource_names_unsubscribe), or [2] - (N)ACKing an earlier resource update from the server (using response_nonce, - with presence of error_detail making it a NACK). Additionally, the first - message (for a given type_url) of a reconnected gRPC stream has a third - role: informing the server of the resources (and their versions) that the - client already possesses, using the initial_resource_versions field. As - with state-of-the-world, when multiple resource types are multiplexed - (ADS), all requests/acknowledgments/updates are logically walled off by - type_url: a Cluster ACK exists in a completely separate world from a prior - Route NACK. In particular, initial_resource_versions being sent at the - "start" of every gRPC stream actually entails a message for each type_url, - each with its own initial_resource_versions. [#next-free-field: 8] - """ - - # The node making the request. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - # Type of the resource that is being requested, e.g. - # "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not - # need to be set if resources are only referenced via - # *xds_resource_subscribe* and *xds_resources_unsubscribe*. - type_url: str = betterproto.string_field(2) - # DeltaDiscoveryRequests allow the client to add or remove individual - # resources to the set of tracked resources in the context of a stream. All - # resource names in the resource_names_subscribe list are added to the set of - # tracked resources and all resource names in the resource_names_unsubscribe - # list are removed from the set of tracked resources. *Unlike* state-of-the- - # world xDS, an empty resource_names_subscribe or resource_names_unsubscribe - # list simply means that no resources are to be added or removed to the - # resource list. *Like* state-of-the-world xDS, the server must send updates - # for all tracked resources, but can also send updates for resources the - # client has not subscribed to. NOTE: the server must respond with all - # resources listed in resource_names_subscribe, even if it believes the - # client has the most recent version of them. The reason: the client may have - # dropped them, but then regained interest before it had a chance to send the - # unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. These - # two fields can be set in any DeltaDiscoveryRequest, including ACKs and - # initial_resource_versions. A list of Resource names to add to the list of - # tracked resources. - resource_names_subscribe: List[str] = betterproto.string_field(3) - # A list of Resource names to remove from the list of tracked resources. - resource_names_unsubscribe: List[str] = betterproto.string_field(4) - # Informs the server of the versions of the resources the xDS client knows - # of, to enable the client to continue the same logical xDS session even in - # the face of gRPC stream reconnection. It will not be populated: [1] in the - # very first stream of a session, since the client will not yet have any - # resources, [2] in any message after the first in a stream (for a given - # type_url), since the server will already be correctly tracking the client's - # state. (In ADS, the first message *of each type_url* of a reconnected - # stream populates this map.) The map's keys are names of xDS resources known - # to the xDS client. The map's values are opaque resource versions. - initial_resource_versions: Dict[str, str] = betterproto.map_field( - 5, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - # When the DeltaDiscoveryRequest is a ACK or NACK message in response to a - # previous DeltaDiscoveryResponse, the response_nonce must be the nonce in - # the DeltaDiscoveryResponse. Otherwise (unlike in DiscoveryRequest) - # response_nonce must be omitted. - response_nonce: str = betterproto.string_field(6) - # This is populated when the previous :ref:`DiscoveryResponse - # ` failed to update - # configuration. The *message* field in *error_details* provides the Envoy - # internal exception related to the failure. - error_detail: "____google_rpc__.Status" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class DeltaDiscoveryResponse(betterproto.Message): - """[#next-free-field: 8]""" - - # The version of the response data (used for debugging). - system_version_info: str = betterproto.string_field(1) - # The response resources. These are typed resources, whose types must match - # the type_url field. - resources: List["Resource"] = betterproto.message_field(2) - # Type URL for resources. Identifies the xDS API when muxing over ADS. Must - # be consistent with the type_url in the Any within 'resources' if - # 'resources' is non-empty. - type_url: str = betterproto.string_field(4) - # Resources names of resources that have be deleted and to be removed from - # the xDS Client. Removed resources for missing resources can be ignored. - removed_resources: List[str] = betterproto.string_field(6) - # The nonce provides a way for DeltaDiscoveryRequests to uniquely reference a - # DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - nonce: str = betterproto.string_field(5) - # [#not-implemented-hide:] The control plane instance that sent the response. - control_plane: "___config_core_v3__.ControlPlane" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class Resource(betterproto.Message): - """[#next-free-field: 8]""" - - # The resource's name, to distinguish it from others of the same type of - # resource. - name: str = betterproto.string_field(3) - # The aliases are a list of other names that this resource can go by. - aliases: List[str] = betterproto.string_field(4) - # The resource level version. It allows xDS to track the state of individual - # resources. - version: str = betterproto.string_field(1) - # The resource being tracked. - resource: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - # Time-to-live value for the resource. For each resource, a timer is started. - # The timer is reset each time the resource is received with a new TTL. If - # the resource is received with no TTL set, the timer is removed for the - # resource. Upon expiration of the timer, the configuration for the resource - # will be removed. The TTL can be refreshed or changed by sending a response - # that doesn't change the resource version. In this case the resource field - # does not need to be populated, which allows for light-weight "heartbeat" - # updates to keep a resource with a TTL alive. The TTL feature is meant to - # support configurations that should be removed in the event of a management - # server failure. For example, the feature may be used for fault injection - # testing where the fault injection should be terminated in the event that - # Envoy loses contact with the management server. - ttl: timedelta = betterproto.message_field(6) - # Cache control properties for the resource. [#not-implemented-hide:] - cache_control: "ResourceCacheControl" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class ResourceCacheControl(betterproto.Message): - """Cache control properties for the resource. [#not-implemented-hide:]""" - - # If true, xDS proxies may not cache this resource. Note that this does not - # apply to clients other than xDS proxies, which must cache resources for - # their own use, regardless of the value of this field. - do_not_cache: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class AdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 - """ - - pass - - -class AggregatedDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_aggregated_resources( - self, - request_iterator: Union[ - AsyncIterable["DiscoveryRequest"], Iterable["DiscoveryRequest"] - ], - ) -> AsyncIterator["DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources", - request_iterator, - DiscoveryRequest, - DiscoveryResponse, - ): - yield response - - async def delta_aggregated_resources( - self, - request_iterator: Union[ - AsyncIterable["DeltaDiscoveryRequest"], Iterable["DeltaDiscoveryRequest"] - ], - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources", - request_iterator, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ): - yield response - - -class AggregatedDiscoveryServiceBase(ServiceBase): - async def stream_aggregated_resources( - self, request_iterator: AsyncIterator["DiscoveryRequest"] - ) -> AsyncIterator["DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_aggregated_resources( - self, request_iterator: AsyncIterator["DeltaDiscoveryRequest"] - ) -> AsyncIterator["DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_aggregated_resources( - self, stream: grpclib.server.Stream - ) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_aggregated_resources, - stream, - request_kwargs, - ) - - async def __rpc_delta_aggregated_resources( - self, stream: grpclib.server.Stream - ) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_aggregated_resources, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources": grpclib.const.Handler( - self.__rpc_stream_aggregated_resources, - grpclib.const.Cardinality.STREAM_STREAM, - DiscoveryRequest, - DiscoveryResponse, - ), - "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources": grpclib.const.Handler( - self.__rpc_delta_aggregated_resources, - grpclib.const.Cardinality.STREAM_STREAM, - DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - ), - } - - -from .....google import rpc as ____google_rpc__ -from ....config.core import v3 as ___config_core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/endpoint/__init__.py b/src/envoy_data_plane/envoy/service/endpoint/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/endpoint/v3/__init__.py b/src/envoy_data_plane/envoy/service/endpoint/v3/__init__.py deleted file mode 100644 index 6cd3da4..0000000 --- a/src/envoy_data_plane/envoy/service/endpoint/v3/__init__.py +++ /dev/null @@ -1,220 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/endpoint/v3/eds.proto, envoy/service/endpoint/v3/leds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class LedsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -@dataclass(eq=False, repr=False) -class EdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -class LocalityEndpointDiscoveryServiceStub(betterproto.ServiceStub): - async def delta_locality_endpoints( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.endpoint.v3.LocalityEndpointDiscoveryService/DeltaLocalityEndpoints", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - -class EndpointDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_endpoints( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.endpoint.v3.EndpointDiscoveryService/StreamEndpoints", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def delta_endpoints( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.endpoint.v3.EndpointDiscoveryService/DeltaEndpoints", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def fetch_endpoints( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.endpoint.v3.EndpointDiscoveryService/FetchEndpoints", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class LocalityEndpointDiscoveryServiceBase(ServiceBase): - async def delta_locality_endpoints( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_delta_locality_endpoints( - self, stream: grpclib.server.Stream - ) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_locality_endpoints, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.endpoint.v3.LocalityEndpointDiscoveryService/DeltaLocalityEndpoints": grpclib.const.Handler( - self.__rpc_delta_locality_endpoints, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - } - - -class EndpointDiscoveryServiceBase(ServiceBase): - async def stream_endpoints( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_endpoints( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_endpoints( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_endpoints(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_endpoints, - stream, - request_kwargs, - ) - - async def __rpc_delta_endpoints(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_endpoints, - stream, - request_kwargs, - ) - - async def __rpc_fetch_endpoints(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_endpoints(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.endpoint.v3.EndpointDiscoveryService/StreamEndpoints": grpclib.const.Handler( - self.__rpc_stream_endpoints, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.endpoint.v3.EndpointDiscoveryService/DeltaEndpoints": grpclib.const.Handler( - self.__rpc_delta_endpoints, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.endpoint.v3.EndpointDiscoveryService/FetchEndpoints": grpclib.const.Handler( - self.__rpc_fetch_endpoints, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -from ...discovery import v3 as __discovery_v3__ diff --git a/src/envoy_data_plane/envoy/service/event_reporting/__init__.py b/src/envoy_data_plane/envoy/service/event_reporting/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/event_reporting/v2alpha/__init__.py b/src/envoy_data_plane/envoy/service/event_reporting/v2alpha/__init__.py deleted file mode 100644 index 8583dfd..0000000 --- a/src/envoy_data_plane/envoy/service/event_reporting/v2alpha/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/event_reporting/v2alpha/event_reporting_service.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamEventsRequest(betterproto.Message): - """ - [#not-implemented-hide:] An events envoy sends to the management server. - """ - - # Identifier data that will only be sent in the first message on the stream. - # This is effectively structured metadata and is a performance optimization. - identifier: "StreamEventsRequestIdentifier" = betterproto.message_field(1) - # Batch of events. When the stream is already active, it will be the events - # occurred since the last message had been sent. If the server receives - # unknown event type, it should silently ignore it. The following events are - # supported: * :ref:`HealthCheckEvent - # ` * - # :ref:`OutlierDetectionEvent - # ` - events: List["betterproto_lib_google_protobuf.Any"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StreamEventsRequestIdentifier(betterproto.Message): - # The node sending the event messages over the stream. - node: "___api_v2_core__.Node" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class StreamEventsResponse(betterproto.Message): - """ - [#not-implemented-hide:] The management server may send envoy a - StreamEventsResponse to tell which events the server is interested in. In - future, with aggregated event reporting service, this message will contain, - for example, clusters the envoy should send events for, or event types the - server wants to process. - """ - - pass - - -class EventReportingServiceStub(betterproto.ServiceStub): - async def stream_events( - self, - request_iterator: Union[ - AsyncIterable["StreamEventsRequest"], Iterable["StreamEventsRequest"] - ], - ) -> AsyncIterator["StreamEventsResponse"]: - - async for response in self._stream_stream( - "/envoy.service.event_reporting.v2alpha.EventReportingService/StreamEvents", - request_iterator, - StreamEventsRequest, - StreamEventsResponse, - ): - yield response - - -class EventReportingServiceBase(ServiceBase): - async def stream_events( - self, request_iterator: AsyncIterator["StreamEventsRequest"] - ) -> AsyncIterator["StreamEventsResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_events(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_events, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.event_reporting.v2alpha.EventReportingService/StreamEvents": grpclib.const.Handler( - self.__rpc_stream_events, - grpclib.const.Cardinality.STREAM_STREAM, - StreamEventsRequest, - StreamEventsResponse, - ), - } - - -from ....api.v2 import core as ___api_v2_core__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/event_reporting/v3/__init__.py b/src/envoy_data_plane/envoy/service/event_reporting/v3/__init__.py deleted file mode 100644 index 6650e32..0000000 --- a/src/envoy_data_plane/envoy/service/event_reporting/v3/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/event_reporting/v3/event_reporting_service.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamEventsRequest(betterproto.Message): - """ - [#not-implemented-hide:] An events envoy sends to the management server. - """ - - # Identifier data that will only be sent in the first message on the stream. - # This is effectively structured metadata and is a performance optimization. - identifier: "StreamEventsRequestIdentifier" = betterproto.message_field(1) - # Batch of events. When the stream is already active, it will be the events - # occurred since the last message had been sent. If the server receives - # unknown event type, it should silently ignore it. The following events are - # supported: * :ref:`HealthCheckEvent - # ` * - # :ref:`OutlierDetectionEvent - # ` - events: List["betterproto_lib_google_protobuf.Any"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StreamEventsRequestIdentifier(betterproto.Message): - # The node sending the event messages over the stream. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class StreamEventsResponse(betterproto.Message): - """ - [#not-implemented-hide:] The management server may send envoy a - StreamEventsResponse to tell which events the server is interested in. In - future, with aggregated event reporting service, this message will contain, - for example, clusters the envoy should send events for, or event types the - server wants to process. - """ - - pass - - -class EventReportingServiceStub(betterproto.ServiceStub): - async def stream_events( - self, - request_iterator: Union[ - AsyncIterable["StreamEventsRequest"], Iterable["StreamEventsRequest"] - ], - ) -> AsyncIterator["StreamEventsResponse"]: - - async for response in self._stream_stream( - "/envoy.service.event_reporting.v3.EventReportingService/StreamEvents", - request_iterator, - StreamEventsRequest, - StreamEventsResponse, - ): - yield response - - -class EventReportingServiceBase(ServiceBase): - async def stream_events( - self, request_iterator: AsyncIterator["StreamEventsRequest"] - ) -> AsyncIterator["StreamEventsResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_events(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_events, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.event_reporting.v3.EventReportingService/StreamEvents": grpclib.const.Handler( - self.__rpc_stream_events, - grpclib.const.Cardinality.STREAM_STREAM, - StreamEventsRequest, - StreamEventsResponse, - ), - } - - -from ....config.core import v3 as ___config_core_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/ext_proc/__init__.py b/src/envoy_data_plane/envoy/service/ext_proc/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/ext_proc/v3/__init__.py b/src/envoy_data_plane/envoy/service/ext_proc/v3/__init__.py deleted file mode 100644 index 0c0a231..0000000 --- a/src/envoy_data_plane/envoy/service/ext_proc/v3/__init__.py +++ /dev/null @@ -1,325 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/ext_proc/v3/external_processor.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class CommonResponseResponseStatus(betterproto.Enum): - CONTINUE = 0 - CONTINUE_AND_REPLACE = 1 - - -@dataclass(eq=False, repr=False) -class ProcessingRequest(betterproto.Message): - """ - This represents the different types of messages that Envoy can send to an - external processing server. [#next-free-field: 8] - """ - - # Specify whether the filter that sent this request is running in synchronous - # or asynchronous mode. The choice of synchronous or asynchronous mode can be - # set in the filter configuration, and defaults to false. * A value of - # "false" indicates that the server must respond to this message by either - # sending back a matching ProcessingResponse message, or by closing the - # stream. * A value of "true" indicates that the server must not respond to - # this message, although it may still close the stream to indicate that no - # more messages are needed. - async_mode: bool = betterproto.bool_field(1) - # Information about the HTTP request headers, as well as peer info and - # additional properties. Unless "async_mode" is true, the server must send - # back a HeaderResponse message, an ImmediateResponse message, or close the - # stream. - request_headers: "HttpHeaders" = betterproto.message_field(2, group="request") - # Information about the HTTP response headers, as well as peer info and - # additional properties. Unless "async_mode" is true, the server must send - # back a HeaderResponse message or close the stream. - response_headers: "HttpHeaders" = betterproto.message_field(3, group="request") - # A chunk of the HTTP request body. Unless "async_mode" is true, the server - # must send back a BodyResponse message, an ImmediateResponse message, or - # close the stream. - request_body: "HttpBody" = betterproto.message_field(4, group="request") - # A chunk of the HTTP request body. Unless "async_mode" is true, the server - # must send back a BodyResponse message or close the stream. - response_body: "HttpBody" = betterproto.message_field(5, group="request") - # The HTTP trailers for the request path. Unless "async_mode" is true, the - # server must send back a TrailerResponse message or close the stream. This - # message is only sent if the trailers processing mode is set to "SEND". If - # there are no trailers on the original downstream request, then this message - # will only be sent (with empty trailers waiting to be populated) if the - # processing mode is set before the request headers are sent, such as in the - # filter configuration. - request_trailers: "HttpTrailers" = betterproto.message_field(6, group="request") - # The HTTP trailers for the response path. Unless "async_mode" is true, the - # server must send back a TrailerResponse message or close the stream. This - # message is only sent if the trailers processing mode is set to "SEND". If - # there are no trailers on the original downstream request, then this message - # will only be sent (with empty trailers waiting to be populated) if the - # processing mode is set before the request headers are sent, such as in the - # filter configuration. - response_trailers: "HttpTrailers" = betterproto.message_field(7, group="request") - - -@dataclass(eq=False, repr=False) -class ProcessingResponse(betterproto.Message): - """ - For every ProcessingRequest received by the server with the "async_mode" - field set to false, the server must send back exactly one - ProcessingResponse message. [#next-free-field: 10] - """ - - # The server must send back this message in response to a message with the - # "request_headers" field set. - request_headers: "HeadersResponse" = betterproto.message_field(1, group="response") - # The server must send back this message in response to a message with the - # "response_headers" field set. - response_headers: "HeadersResponse" = betterproto.message_field(2, group="response") - # The server must send back this message in response to a message with the - # "request_body" field set. - request_body: "BodyResponse" = betterproto.message_field(3, group="response") - # The server must send back this message in response to a message with the - # "response_body" field set. - response_body: "BodyResponse" = betterproto.message_field(4, group="response") - # The server must send back this message in response to a message with the - # "request_trailers" field set. - request_trailers: "TrailersResponse" = betterproto.message_field( - 5, group="response" - ) - # The server must send back this message in response to a message with the - # "response_trailers" field set. - response_trailers: "TrailersResponse" = betterproto.message_field( - 6, group="response" - ) - # If specified, attempt to create a locally generated response, send it - # downstream, and stop processing additional filters and ignore any - # additional messages received from the remote server for this request or - # response. If a response has already started -- for example, if this message - # is sent response to a "response_body" message -- then this will either ship - # the reply directly to the downstream codec, or reset the stream. - immediate_response: "ImmediateResponse" = betterproto.message_field( - 7, group="response" - ) - # [#not-implemented-hide:] Optional metadata that will be emitted as dynamic - # metadata to be consumed by the next filter. This metadata will be placed in - # the namespace "envoy.filters.http.ext_proc". - dynamic_metadata: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(8) - ) - # Override how parts of the HTTP request and response are processed for the - # duration of this particular request/response only. Servers may use this to - # intelligently control how requests are processed based on the headers and - # other metadata that they see. - mode_override: "___extensions_filters_http_ext_proc_v3__.ProcessingMode" = ( - betterproto.message_field(9) - ) - - -@dataclass(eq=False, repr=False) -class HttpHeaders(betterproto.Message): - """ - This message is sent to the external server when the HTTP request and - responses are first received. - """ - - # The HTTP request headers. All header keys will be lower-cased, because HTTP - # header keys are case-insensitive. - headers: "___config_core_v3__.HeaderMap" = betterproto.message_field(1) - # [#not-implemented-hide:] The values of properties selected by the - # "request_attributes" or "response_attributes" list in the configuration. - # Each entry in the list is populated from the standard :ref:`attributes - # ` supported across Envoy. - attributes: Dict[ - str, "betterproto_lib_google_protobuf.Struct" - ] = betterproto.map_field(2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE) - # If true, then there is no message body associated with this request or - # response. - end_of_stream: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class HttpBody(betterproto.Message): - """ - This message contains the message body that Envoy sends to the external - server. - """ - - body: bytes = betterproto.bytes_field(1) - end_of_stream: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class HttpTrailers(betterproto.Message): - """This message contains the trailers.""" - - trailers: "___config_core_v3__.HeaderMap" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HeadersResponse(betterproto.Message): - """This message must be sent in response to an HttpHeaders message.""" - - response: "CommonResponse" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class TrailersResponse(betterproto.Message): - """This message must be sent in response to an HttpTrailers message.""" - - # Instructions on how to manipulate the trailers - header_mutation: "HeaderMutation" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class BodyResponse(betterproto.Message): - """This message must be sent in response to an HttpBody message.""" - - response: "CommonResponse" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class CommonResponse(betterproto.Message): - """ - This message contains common fields between header and body responses. - [#next-free-field: 6] - """ - - # If set, provide additional direction on how the Envoy proxy should handle - # the rest of the HTTP filter chain. - status: "CommonResponseResponseStatus" = betterproto.enum_field(1) - # Instructions on how to manipulate the headers. When responding to an - # HttpBody request, header mutations will only take effect if the current - # processing mode for the body is BUFFERED. - header_mutation: "HeaderMutation" = betterproto.message_field(2) - # Replace the body of the last message sent to the remote server on this - # stream. If responding to an HttpBody request, simply replace or clear the - # body chunk that was sent with that request. Body mutations only take effect - # in response to "body" messages and are ignored otherwise. - body_mutation: "BodyMutation" = betterproto.message_field(3) - # [#not-implemented-hide:] Add new trailers to the message. This may be used - # when responding to either a HttpHeaders or HttpBody message, but only if - # this message is returned along with the CONTINUE_AND_REPLACE status. - trailers: "___config_core_v3__.HeaderMap" = betterproto.message_field(4) - # Clear the route cache for the current request. This is necessary if the - # remote server modified headers that are used to calculate the route. - clear_route_cache: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class ImmediateResponse(betterproto.Message): - """ - This message causes the filter to attempt to create a locally generated - response, send it downstream, stop processing additional filters, and - ignore any additional messages received from the remote server for this - request or response. If a response has already started, then this will - either ship the reply directly to the downstream codec, or reset the - stream. [#next-free-field: 6] - """ - - # The response code to return - status: "___type_v3__.HttpStatus" = betterproto.message_field(1) - # Apply changes to the default headers, which will include content-type. - headers: "HeaderMutation" = betterproto.message_field(2) - # The message body to return with the response which is sent using the - # text/plain content type, or encoded in the grpc-message header. - body: str = betterproto.string_field(3) - # If set, then include a gRPC status trailer. - grpc_status: "GrpcStatus" = betterproto.message_field(4) - # A string detailing why this local reply was sent, which may be included in - # log and debug output. - details: str = betterproto.string_field(5) - - -@dataclass(eq=False, repr=False) -class GrpcStatus(betterproto.Message): - """ - This message specifies a gRPC status for an ImmediateResponse message. - """ - - # The actual gRPC status - status: int = betterproto.uint32_field(1) - - -@dataclass(eq=False, repr=False) -class HeaderMutation(betterproto.Message): - """ - Change HTTP headers or trailers by appending, replacing, or removing - headers. - """ - - # Add or replace HTTP headers. Attempts to set the value of any "x-envoy" - # header, and attempts to set the ":method", ":authority", ":scheme", or - # "host" headers will be ignored. - set_headers: List[ - "___config_core_v3__.HeaderValueOption" - ] = betterproto.message_field(1) - # Remove these HTTP headers. Attempts to remove system headers -- any header - # starting with ":", plus "host" -- will be ignored. - remove_headers: List[str] = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class BodyMutation(betterproto.Message): - """ - Replace the entire message body chunk received in the corresponding - HttpBody message with this new body, or clear the body. - """ - - # The entire body to replace - body: bytes = betterproto.bytes_field(1, group="mutation") - # Clear the corresponding body chunk - clear_body: bool = betterproto.bool_field(2, group="mutation") - - -class ExternalProcessorStub(betterproto.ServiceStub): - async def process( - self, - request_iterator: Union[ - AsyncIterable["ProcessingRequest"], Iterable["ProcessingRequest"] - ], - ) -> AsyncIterator["ProcessingResponse"]: - - async for response in self._stream_stream( - "/envoy.service.ext_proc.v3.ExternalProcessor/Process", - request_iterator, - ProcessingRequest, - ProcessingResponse, - ): - yield response - - -class ExternalProcessorBase(ServiceBase): - async def process( - self, request_iterator: AsyncIterator["ProcessingRequest"] - ) -> AsyncIterator["ProcessingResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_process(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.process, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.ext_proc.v3.ExternalProcessor/Process": grpclib.const.Handler( - self.__rpc_process, - grpclib.const.Cardinality.STREAM_STREAM, - ProcessingRequest, - ProcessingResponse, - ), - } - - -from ....config.core import v3 as ___config_core_v3__ -from ....extensions.filters.http.ext_proc import ( - v3 as ___extensions_filters_http_ext_proc_v3__, -) -from ....type import v3 as ___type_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/extension/__init__.py b/src/envoy_data_plane/envoy/service/extension/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/extension/v3/__init__.py b/src/envoy_data_plane/envoy/service/extension/v3/__init__.py deleted file mode 100644 index c9d9788..0000000 --- a/src/envoy_data_plane/envoy/service/extension/v3/__init__.py +++ /dev/null @@ -1,169 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/extension/v3/config_discovery.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class EcdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -class ExtensionConfigDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_extension_configs( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.extension.v3.ExtensionConfigDiscoveryService/StreamExtensionConfigs", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def delta_extension_configs( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.extension.v3.ExtensionConfigDiscoveryService/DeltaExtensionConfigs", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def fetch_extension_configs( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.extension.v3.ExtensionConfigDiscoveryService/FetchExtensionConfigs", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class ExtensionConfigDiscoveryServiceBase(ServiceBase): - async def stream_extension_configs( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_extension_configs( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_extension_configs( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_extension_configs( - self, stream: grpclib.server.Stream - ) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_extension_configs, - stream, - request_kwargs, - ) - - async def __rpc_delta_extension_configs( - self, stream: grpclib.server.Stream - ) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_extension_configs, - stream, - request_kwargs, - ) - - async def __rpc_fetch_extension_configs( - self, stream: grpclib.server.Stream - ) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_extension_configs(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.extension.v3.ExtensionConfigDiscoveryService/StreamExtensionConfigs": grpclib.const.Handler( - self.__rpc_stream_extension_configs, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.extension.v3.ExtensionConfigDiscoveryService/DeltaExtensionConfigs": grpclib.const.Handler( - self.__rpc_delta_extension_configs, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.extension.v3.ExtensionConfigDiscoveryService/FetchExtensionConfigs": grpclib.const.Handler( - self.__rpc_fetch_extension_configs, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -from ...discovery import v3 as __discovery_v3__ diff --git a/src/envoy_data_plane/envoy/service/health/__init__.py b/src/envoy_data_plane/envoy/service/health/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/health/v3/__init__.py b/src/envoy_data_plane/envoy/service/health/v3/__init__.py deleted file mode 100644 index 0a374f3..0000000 --- a/src/envoy_data_plane/envoy/service/health/v3/__init__.py +++ /dev/null @@ -1,230 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/health/v3/hds.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import timedelta -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class CapabilityProtocol(betterproto.Enum): - HTTP = 0 - TCP = 1 - REDIS = 2 - - -@dataclass(eq=False, repr=False) -class Capability(betterproto.Message): - """ - Defines supported protocols etc, so the management server can assign proper - endpoints to healthcheck. - """ - - health_check_protocols: List["CapabilityProtocol"] = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class HealthCheckRequest(betterproto.Message): - node: "___config_core_v3__.Node" = betterproto.message_field(1) - capability: "Capability" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class EndpointHealth(betterproto.Message): - endpoint: "___config_endpoint_v3__.Endpoint" = betterproto.message_field(1) - health_status: "___config_core_v3__.HealthStatus" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class LocalityEndpointsHealth(betterproto.Message): - """Group endpoint health by locality under each cluster.""" - - locality: "___config_core_v3__.Locality" = betterproto.message_field(1) - endpoints_health: List["EndpointHealth"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterEndpointsHealth(betterproto.Message): - """ - The health status of endpoints in a cluster. The cluster name and locality - should match the corresponding fields in ClusterHealthCheck message. - """ - - cluster_name: str = betterproto.string_field(1) - locality_endpoints_health: List[ - "LocalityEndpointsHealth" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class EndpointHealthResponse(betterproto.Message): - # Deprecated - Flat list of endpoint health information. - endpoints_health: List["EndpointHealth"] = betterproto.message_field(1) - # Organize Endpoint health information by cluster. - cluster_endpoints_health: List[ - "ClusterEndpointsHealth" - ] = betterproto.message_field(2) - - def __post_init__(self) -> None: - super().__post_init__() - if self.endpoints_health: - warnings.warn( - "EndpointHealthResponse.endpoints_health is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class HealthCheckRequestOrEndpointHealthResponse(betterproto.Message): - health_check_request: "HealthCheckRequest" = betterproto.message_field( - 1, group="request_type" - ) - endpoint_health_response: "EndpointHealthResponse" = betterproto.message_field( - 2, group="request_type" - ) - - -@dataclass(eq=False, repr=False) -class LocalityEndpoints(betterproto.Message): - locality: "___config_core_v3__.Locality" = betterproto.message_field(1) - endpoints: List["___config_endpoint_v3__.Endpoint"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClusterHealthCheck(betterproto.Message): - """ - The cluster name and locality is provided to Envoy for the endpoints that - it health checks to support statistics reporting, logging and debugging by - the Envoy instance (outside of HDS). For maximum usefulness, it should - match the same cluster structure as that provided by EDS. - """ - - cluster_name: str = betterproto.string_field(1) - health_checks: List["___config_core_v3__.HealthCheck"] = betterproto.message_field( - 2 - ) - locality_endpoints: List["LocalityEndpoints"] = betterproto.message_field(3) - # Optional map that gets filtered by - # :ref:`health_checks.transport_socket_match_criteria ` on connection - # when health checking. For more details, see - # :ref:`config.cluster.v3.Cluster.transport_socket_matches - # `. - transport_socket_matches: List[ - "___config_cluster_v3__.ClusterTransportSocketMatch" - ] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class HealthCheckSpecifier(betterproto.Message): - cluster_health_checks: List["ClusterHealthCheck"] = betterproto.message_field(1) - # The default is 1 second. - interval: timedelta = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -class HealthDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_health_check( - self, - request_iterator: Union[ - AsyncIterable["HealthCheckRequestOrEndpointHealthResponse"], - Iterable["HealthCheckRequestOrEndpointHealthResponse"], - ], - ) -> AsyncIterator["HealthCheckSpecifier"]: - - async for response in self._stream_stream( - "/envoy.service.health.v3.HealthDiscoveryService/StreamHealthCheck", - request_iterator, - HealthCheckRequestOrEndpointHealthResponse, - HealthCheckSpecifier, - ): - yield response - - async def fetch_health_check( - self, - *, - health_check_request: "HealthCheckRequest" = None, - endpoint_health_response: "EndpointHealthResponse" = None - ) -> "HealthCheckSpecifier": - - request = HealthCheckRequestOrEndpointHealthResponse() - if health_check_request is not None: - request.health_check_request = health_check_request - if endpoint_health_response is not None: - request.endpoint_health_response = endpoint_health_response - - return await self._unary_unary( - "/envoy.service.health.v3.HealthDiscoveryService/FetchHealthCheck", - request, - HealthCheckSpecifier, - ) - - -class HealthDiscoveryServiceBase(ServiceBase): - async def stream_health_check( - self, - request_iterator: AsyncIterator["HealthCheckRequestOrEndpointHealthResponse"], - ) -> AsyncIterator["HealthCheckSpecifier"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_health_check( - self, - health_check_request: "HealthCheckRequest", - endpoint_health_response: "EndpointHealthResponse", - ) -> "HealthCheckSpecifier": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_health_check(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_health_check, - stream, - request_kwargs, - ) - - async def __rpc_fetch_health_check(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "health_check_request": request.health_check_request, - "endpoint_health_response": request.endpoint_health_response, - } - - response = await self.fetch_health_check(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.health.v3.HealthDiscoveryService/StreamHealthCheck": grpclib.const.Handler( - self.__rpc_stream_health_check, - grpclib.const.Cardinality.STREAM_STREAM, - HealthCheckRequestOrEndpointHealthResponse, - HealthCheckSpecifier, - ), - "/envoy.service.health.v3.HealthDiscoveryService/FetchHealthCheck": grpclib.const.Handler( - self.__rpc_fetch_health_check, - grpclib.const.Cardinality.UNARY_UNARY, - HealthCheckRequestOrEndpointHealthResponse, - HealthCheckSpecifier, - ), - } - - -from ....config.cluster import v3 as ___config_cluster_v3__ -from ....config.core import v3 as ___config_core_v3__ -from ....config.endpoint import v3 as ___config_endpoint_v3__ diff --git a/src/envoy_data_plane/envoy/service/listener/__init__.py b/src/envoy_data_plane/envoy/service/listener/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/listener/v3/__init__.py b/src/envoy_data_plane/envoy/service/listener/v3/__init__.py deleted file mode 100644 index a222bb7..0000000 --- a/src/envoy_data_plane/envoy/service/listener/v3/__init__.py +++ /dev/null @@ -1,163 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/listener/v3/lds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class LdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -class ListenerDiscoveryServiceStub(betterproto.ServiceStub): - async def delta_listeners( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.listener.v3.ListenerDiscoveryService/DeltaListeners", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def stream_listeners( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.listener.v3.ListenerDiscoveryService/StreamListeners", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def fetch_listeners( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.listener.v3.ListenerDiscoveryService/FetchListeners", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class ListenerDiscoveryServiceBase(ServiceBase): - async def delta_listeners( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def stream_listeners( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_listeners( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_delta_listeners(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_listeners, - stream, - request_kwargs, - ) - - async def __rpc_stream_listeners(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_listeners, - stream, - request_kwargs, - ) - - async def __rpc_fetch_listeners(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_listeners(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.listener.v3.ListenerDiscoveryService/DeltaListeners": grpclib.const.Handler( - self.__rpc_delta_listeners, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.listener.v3.ListenerDiscoveryService/StreamListeners": grpclib.const.Handler( - self.__rpc_stream_listeners, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.listener.v3.ListenerDiscoveryService/FetchListeners": grpclib.const.Handler( - self.__rpc_fetch_listeners, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -from ...discovery import v3 as __discovery_v3__ diff --git a/src/envoy_data_plane/envoy/service/load_stats/__init__.py b/src/envoy_data_plane/envoy/service/load_stats/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/load_stats/v2/__init__.py b/src/envoy_data_plane/envoy/service/load_stats/v2/__init__.py deleted file mode 100644 index e56253b..0000000 --- a/src/envoy_data_plane/envoy/service/load_stats/v2/__init__.py +++ /dev/null @@ -1,102 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/load_stats/v2/lrs.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class LoadStatsRequest(betterproto.Message): - """ - A load report Envoy sends to the management server. [#not-implemented- - hide:] Not configuration. TBD how to doc proto APIs. - """ - - # Node identifier for Envoy instance. - node: "___api_v2_core__.Node" = betterproto.message_field(1) - # A list of load stats to report. - cluster_stats: List[ - "___api_v2_endpoint__.ClusterStats" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class LoadStatsResponse(betterproto.Message): - """ - The management server sends envoy a LoadStatsResponse with all clusters it - is interested in learning load stats about. [#not-implemented-hide:] Not - configuration. TBD how to doc proto APIs. - """ - - # Clusters to report stats for. Not populated if *send_all_clusters* is true. - clusters: List[str] = betterproto.string_field(1) - # If true, the client should send all clusters it knows about. Only clients - # that advertise the "envoy.lrs.supports_send_all_clusters" capability in - # their :ref:`client_features` - # field will honor this field. - send_all_clusters: bool = betterproto.bool_field(4) - # The minimum interval of time to collect stats over. This is only a minimum - # for two reasons: 1. There may be some delay from when the timer fires until - # stats sampling occurs. 2. For clusters that were already feature in the - # previous *LoadStatsResponse*, any traffic that is observed in between - # the corresponding previous *LoadStatsRequest* and this - # *LoadStatsResponse* will also be accumulated and billed to the cluster. - # This avoids a period of inobservability that might otherwise exists - # between the messages. New clusters are not subject to this - # consideration. - load_reporting_interval: timedelta = betterproto.message_field(2) - # Set to *true* if the management server supports endpoint granularity - # report. - report_endpoint_granularity: bool = betterproto.bool_field(3) - - -class LoadReportingServiceStub(betterproto.ServiceStub): - async def stream_load_stats( - self, - request_iterator: Union[ - AsyncIterable["LoadStatsRequest"], Iterable["LoadStatsRequest"] - ], - ) -> AsyncIterator["LoadStatsResponse"]: - - async for response in self._stream_stream( - "/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats", - request_iterator, - LoadStatsRequest, - LoadStatsResponse, - ): - yield response - - -class LoadReportingServiceBase(ServiceBase): - async def stream_load_stats( - self, request_iterator: AsyncIterator["LoadStatsRequest"] - ) -> AsyncIterator["LoadStatsResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_load_stats(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_load_stats, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats": grpclib.const.Handler( - self.__rpc_stream_load_stats, - grpclib.const.Cardinality.STREAM_STREAM, - LoadStatsRequest, - LoadStatsResponse, - ), - } - - -from ....api.v2 import core as ___api_v2_core__ -from ....api.v2 import endpoint as ___api_v2_endpoint__ diff --git a/src/envoy_data_plane/envoy/service/load_stats/v3/__init__.py b/src/envoy_data_plane/envoy/service/load_stats/v3/__init__.py deleted file mode 100644 index 2b457e5..0000000 --- a/src/envoy_data_plane/envoy/service/load_stats/v3/__init__.py +++ /dev/null @@ -1,98 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/load_stats/v3/lrs.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class LoadStatsRequest(betterproto.Message): - """A load report Envoy sends to the management server.""" - - # Node identifier for Envoy instance. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - # A list of load stats to report. - cluster_stats: List[ - "___config_endpoint_v3__.ClusterStats" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class LoadStatsResponse(betterproto.Message): - """ - The management server sends envoy a LoadStatsResponse with all clusters it - is interested in learning load stats about. - """ - - # Clusters to report stats for. Not populated if *send_all_clusters* is true. - clusters: List[str] = betterproto.string_field(1) - # If true, the client should send all clusters it knows about. Only clients - # that advertise the "envoy.lrs.supports_send_all_clusters" capability in - # their :ref:`client_features` field will honor this field. - send_all_clusters: bool = betterproto.bool_field(4) - # The minimum interval of time to collect stats over. This is only a minimum - # for two reasons: 1. There may be some delay from when the timer fires until - # stats sampling occurs. 2. For clusters that were already feature in the - # previous *LoadStatsResponse*, any traffic that is observed in between - # the corresponding previous *LoadStatsRequest* and this - # *LoadStatsResponse* will also be accumulated and billed to the cluster. - # This avoids a period of inobservability that might otherwise exists - # between the messages. New clusters are not subject to this - # consideration. - load_reporting_interval: timedelta = betterproto.message_field(2) - # Set to *true* if the management server supports endpoint granularity - # report. - report_endpoint_granularity: bool = betterproto.bool_field(3) - - -class LoadReportingServiceStub(betterproto.ServiceStub): - async def stream_load_stats( - self, - request_iterator: Union[ - AsyncIterable["LoadStatsRequest"], Iterable["LoadStatsRequest"] - ], - ) -> AsyncIterator["LoadStatsResponse"]: - - async for response in self._stream_stream( - "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats", - request_iterator, - LoadStatsRequest, - LoadStatsResponse, - ): - yield response - - -class LoadReportingServiceBase(ServiceBase): - async def stream_load_stats( - self, request_iterator: AsyncIterator["LoadStatsRequest"] - ) -> AsyncIterator["LoadStatsResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_load_stats(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_load_stats, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats": grpclib.const.Handler( - self.__rpc_stream_load_stats, - grpclib.const.Cardinality.STREAM_STREAM, - LoadStatsRequest, - LoadStatsResponse, - ), - } - - -from ....config.core import v3 as ___config_core_v3__ -from ....config.endpoint import v3 as ___config_endpoint_v3__ diff --git a/src/envoy_data_plane/envoy/service/metrics/__init__.py b/src/envoy_data_plane/envoy/service/metrics/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/metrics/v2/__init__.py b/src/envoy_data_plane/envoy/service/metrics/v2/__init__.py deleted file mode 100644 index 37821bd..0000000 --- a/src/envoy_data_plane/envoy/service/metrics/v2/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/metrics/v2/metrics_service.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamMetricsResponse(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class StreamMetricsMessage(betterproto.Message): - # Identifier data effectively is a structured metadata. As a performance - # optimization this will only be sent in the first message on the stream. - identifier: "StreamMetricsMessageIdentifier" = betterproto.message_field(1) - # A list of metric entries - envoy_metrics: List[ - "____io_prometheus_client__.MetricFamily" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StreamMetricsMessageIdentifier(betterproto.Message): - # The node sending metrics over the stream. - node: "___api_v2_core__.Node" = betterproto.message_field(1) - - -class MetricsServiceStub(betterproto.ServiceStub): - async def stream_metrics( - self, - request_iterator: Union[ - AsyncIterable["StreamMetricsMessage"], Iterable["StreamMetricsMessage"] - ], - ) -> "StreamMetricsResponse": - - return await self._stream_unary( - "/envoy.service.metrics.v2.MetricsService/StreamMetrics", - request_iterator, - StreamMetricsMessage, - StreamMetricsResponse, - ) - - -class MetricsServiceBase(ServiceBase): - async def stream_metrics( - self, request_iterator: AsyncIterator["StreamMetricsMessage"] - ) -> "StreamMetricsResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_metrics(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_metrics(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.metrics.v2.MetricsService/StreamMetrics": grpclib.const.Handler( - self.__rpc_stream_metrics, - grpclib.const.Cardinality.STREAM_UNARY, - StreamMetricsMessage, - StreamMetricsResponse, - ), - } - - -from .....io.prometheus import client as ____io_prometheus_client__ -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/service/metrics/v3/__init__.py b/src/envoy_data_plane/envoy/service/metrics/v3/__init__.py deleted file mode 100644 index 54e8486..0000000 --- a/src/envoy_data_plane/envoy/service/metrics/v3/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/metrics/v3/metrics_service.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamMetricsResponse(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class StreamMetricsMessage(betterproto.Message): - # Identifier data effectively is a structured metadata. As a performance - # optimization this will only be sent in the first message on the stream. - identifier: "StreamMetricsMessageIdentifier" = betterproto.message_field(1) - # A list of metric entries - envoy_metrics: List[ - "____io_prometheus_client__.MetricFamily" - ] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StreamMetricsMessageIdentifier(betterproto.Message): - # The node sending metrics over the stream. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - - -class MetricsServiceStub(betterproto.ServiceStub): - async def stream_metrics( - self, - request_iterator: Union[ - AsyncIterable["StreamMetricsMessage"], Iterable["StreamMetricsMessage"] - ], - ) -> "StreamMetricsResponse": - - return await self._stream_unary( - "/envoy.service.metrics.v3.MetricsService/StreamMetrics", - request_iterator, - StreamMetricsMessage, - StreamMetricsResponse, - ) - - -class MetricsServiceBase(ServiceBase): - async def stream_metrics( - self, request_iterator: AsyncIterator["StreamMetricsMessage"] - ) -> "StreamMetricsResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_metrics(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_metrics(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.metrics.v3.MetricsService/StreamMetrics": grpclib.const.Handler( - self.__rpc_stream_metrics, - grpclib.const.Cardinality.STREAM_UNARY, - StreamMetricsMessage, - StreamMetricsResponse, - ), - } - - -from .....io.prometheus import client as ____io_prometheus_client__ -from ....config.core import v3 as ___config_core_v3__ diff --git a/src/envoy_data_plane/envoy/service/ratelimit/__init__.py b/src/envoy_data_plane/envoy/service/ratelimit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/ratelimit/v2/__init__.py b/src/envoy_data_plane/envoy/service/ratelimit/v2/__init__.py deleted file mode 100644 index d368481..0000000 --- a/src/envoy_data_plane/envoy/service/ratelimit/v2/__init__.py +++ /dev/null @@ -1,156 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/ratelimit/v2/rls.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class RateLimitResponseCode(betterproto.Enum): - UNKNOWN = 0 - OK = 1 - OVER_LIMIT = 2 - - -class RateLimitResponseRateLimitUnit(betterproto.Enum): - UNKNOWN = 0 - SECOND = 1 - MINUTE = 2 - HOUR = 3 - DAY = 4 - - -@dataclass(eq=False, repr=False) -class RateLimitRequest(betterproto.Message): - """ - Main message for a rate limit request. The rate limit service is designed - to be fully generic in the sense that it can operate on arbitrary - hierarchical key/value pairs. The loaded configuration will parse the - request and find the most specific limit to apply. In addition, a - RateLimitRequest can contain multiple "descriptors" to limit on. When - multiple descriptors are provided, the server will limit on *ALL* of them - and return an OVER_LIMIT response if any of them are over limit. This - enables more complex application level rate limiting scenarios if desired. - """ - - # All rate limit requests must specify a domain. This enables the - # configuration to be per application without fear of overlap. E.g., "envoy". - domain: str = betterproto.string_field(1) - # All rate limit requests must specify at least one RateLimitDescriptor. Each - # descriptor is processed by the service (see below). If any of the - # descriptors are over limit, the entire request is considered to be over - # limit. - descriptors: List[ - "___api_v2_ratelimit__.RateLimitDescriptor" - ] = betterproto.message_field(2) - # Rate limit requests can optionally specify the number of hits a request - # adds to the matched limit. If the value is not set in the message, a - # request increases the matched limit by 1. - hits_addend: int = betterproto.uint32_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitResponse(betterproto.Message): - """A response from a ShouldRateLimit call.""" - - # The overall response code which takes into account all of the descriptors - # that were passed in the RateLimitRequest message. - overall_code: "RateLimitResponseCode" = betterproto.enum_field(1) - # A list of DescriptorStatus messages which matches the length of the - # descriptor list passed in the RateLimitRequest. This can be used by the - # caller to determine which individual descriptors failed and/or what the - # currently configured limits are for all of them. - statuses: List["RateLimitResponseDescriptorStatus"] = betterproto.message_field(2) - # A list of headers to add to the response - headers: List["___api_v2_core__.HeaderValue"] = betterproto.message_field(3) - # A list of headers to add to the request when forwarded - request_headers_to_add: List[ - "___api_v2_core__.HeaderValue" - ] = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class RateLimitResponseRateLimit(betterproto.Message): - """ - Defines an actual rate limit in terms of requests per unit of time and the - unit itself. - """ - - # A name or description of this limit. - name: str = betterproto.string_field(3) - # The number of requests per unit of time. - requests_per_unit: int = betterproto.uint32_field(1) - # The unit of time. - unit: "RateLimitResponseRateLimitUnit" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimitResponseDescriptorStatus(betterproto.Message): - # The response code for an individual descriptor. - code: "RateLimitResponseCode" = betterproto.enum_field(1) - # The current limit as configured by the server. Useful for debugging, etc. - current_limit: "RateLimitResponseRateLimit" = betterproto.message_field(2) - # The limit remaining in the current time unit. - limit_remaining: int = betterproto.uint32_field(3) - - -class RateLimitServiceStub(betterproto.ServiceStub): - async def should_rate_limit( - self, - *, - domain: str = "", - descriptors: Optional[List["___api_v2_ratelimit__.RateLimitDescriptor"]] = None, - hits_addend: int = 0 - ) -> "RateLimitResponse": - descriptors = descriptors or [] - - request = RateLimitRequest() - request.domain = domain - if descriptors is not None: - request.descriptors = descriptors - request.hits_addend = hits_addend - - return await self._unary_unary( - "/envoy.service.ratelimit.v2.RateLimitService/ShouldRateLimit", - request, - RateLimitResponse, - ) - - -class RateLimitServiceBase(ServiceBase): - async def should_rate_limit( - self, - domain: str, - descriptors: Optional[List["___api_v2_ratelimit__.RateLimitDescriptor"]], - hits_addend: int, - ) -> "RateLimitResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_should_rate_limit(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "domain": request.domain, - "descriptors": request.descriptors, - "hits_addend": request.hits_addend, - } - - response = await self.should_rate_limit(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.ratelimit.v2.RateLimitService/ShouldRateLimit": grpclib.const.Handler( - self.__rpc_should_rate_limit, - grpclib.const.Cardinality.UNARY_UNARY, - RateLimitRequest, - RateLimitResponse, - ), - } - - -from ....api.v2 import core as ___api_v2_core__ -from ....api.v2 import ratelimit as ___api_v2_ratelimit__ diff --git a/src/envoy_data_plane/envoy/service/ratelimit/v3/__init__.py b/src/envoy_data_plane/envoy/service/ratelimit/v3/__init__.py deleted file mode 100644 index a3c464c..0000000 --- a/src/envoy_data_plane/envoy/service/ratelimit/v3/__init__.py +++ /dev/null @@ -1,255 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/ratelimit/v3/rls.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime, timedelta -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class RateLimitResponseCode(betterproto.Enum): - UNKNOWN = 0 - OK = 1 - OVER_LIMIT = 2 - - -class RateLimitResponseRateLimitUnit(betterproto.Enum): - UNKNOWN = 0 - SECOND = 1 - MINUTE = 2 - HOUR = 3 - DAY = 4 - - -@dataclass(eq=False, repr=False) -class RateLimitRequest(betterproto.Message): - """ - Main message for a rate limit request. The rate limit service is designed - to be fully generic in the sense that it can operate on arbitrary - hierarchical key/value pairs. The loaded configuration will parse the - request and find the most specific limit to apply. In addition, a - RateLimitRequest can contain multiple "descriptors" to limit on. When - multiple descriptors are provided, the server will limit on *ALL* of them - and return an OVER_LIMIT response if any of them are over limit. This - enables more complex application level rate limiting scenarios if desired. - """ - - # All rate limit requests must specify a domain. This enables the - # configuration to be per application without fear of overlap. E.g., "envoy". - domain: str = betterproto.string_field(1) - # All rate limit requests must specify at least one RateLimitDescriptor. Each - # descriptor is processed by the service (see below). If any of the - # descriptors are over limit, the entire request is considered to be over - # limit. - descriptors: List[ - "___extensions_common_ratelimit_v3__.RateLimitDescriptor" - ] = betterproto.message_field(2) - # Rate limit requests can optionally specify the number of hits a request - # adds to the matched limit. If the value is not set in the message, a - # request increases the matched limit by 1. - hits_addend: int = betterproto.uint32_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitResponse(betterproto.Message): - """A response from a ShouldRateLimit call. [#next-free-field: 8]""" - - # The overall response code which takes into account all of the descriptors - # that were passed in the RateLimitRequest message. - overall_code: "RateLimitResponseCode" = betterproto.enum_field(1) - # A list of DescriptorStatus messages which matches the length of the - # descriptor list passed in the RateLimitRequest. This can be used by the - # caller to determine which individual descriptors failed and/or what the - # currently configured limits are for all of them. - statuses: List["RateLimitResponseDescriptorStatus"] = betterproto.message_field(2) - # A list of headers to add to the response - response_headers_to_add: List[ - "___config_core_v3__.HeaderValue" - ] = betterproto.message_field(3) - # A list of headers to add to the request when forwarded - request_headers_to_add: List[ - "___config_core_v3__.HeaderValue" - ] = betterproto.message_field(4) - # A response body to send to the downstream client when the response code is - # not OK. - raw_body: bytes = betterproto.bytes_field(5) - # Optional response metadata that will be emitted as dynamic metadata to be - # consumed by the next filter. This metadata lives in a namespace specified - # by the canonical name of extension filter that requires it: - - # :ref:`envoy.filters.http.ratelimit - # ` for HTTP filter. - - # :ref:`envoy.filters.network.ratelimit - # ` for network filter. - - # :ref:`envoy.filters.thrift.rate_limit - # ` for Thrift filter. - dynamic_metadata: "betterproto_lib_google_protobuf.Struct" = ( - betterproto.message_field(6) - ) - # Quota is available for a request if its entire descriptor set has cached - # quota available. This is a union of all descriptors in the descriptor set. - # Clients can use the quota for future matches if and only if the descriptor - # set matches what was sent in the request that originated this response. If - # quota is available, a RLS request will not be made and the quota will be - # reduced by 1. If quota is not available (i.e., a cached entry doesn't exist - # for a RLS descriptor set), a RLS request will be triggered. If the server - # did not provide a quota, such as the quota message is empty then the - # request admission is determined by the :ref:`overall_code - # `. - # If there is not sufficient quota and the cached entry exists for a RLS - # descriptor set is out-of-quota but not expired, the request will be treated - # as OVER_LIMIT. [#not-implemented-hide:] - quota: "RateLimitResponseQuota" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class RateLimitResponseRateLimit(betterproto.Message): - """ - Defines an actual rate limit in terms of requests per unit of time and the - unit itself. - """ - - # A name or description of this limit. - name: str = betterproto.string_field(3) - # The number of requests per unit of time. - requests_per_unit: int = betterproto.uint32_field(1) - # The unit of time. - unit: "RateLimitResponseRateLimitUnit" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class RateLimitResponseQuota(betterproto.Message): - """ - Cacheable quota for responses. Quota can be granted at different levels: - either for each individual descriptor or for the whole descriptor set. This - is a certain number of requests over a period of time. The client may cache - this result and apply the effective RateLimitResponse to future matching - requests without querying rate limit service. When quota expires due to - timeout, a new RLS request will also be made. The implementation may choose - to preemptively query the rate limit server for more quota on or before - expiration or before the available quota runs out. [#not-implemented-hide:] - """ - - # Number of matching requests granted in quota. Must be 1 or more. - requests: int = betterproto.uint32_field(1) - # Point in time at which the quota expires. - valid_until: datetime = betterproto.message_field(2, group="expiration_specifier") - # The unique id that is associated with each Quota either at individual - # descriptor level or whole descriptor set level. For a matching policy with - # boolean logic, for example, match: "request.headers['environment'] == - # 'staging' || request.headers['environment'] == 'dev'"), the request_headers - # action produces a distinct list of descriptors for each possible value of - # the ‘environment’ header even though the granted quota is same. Thus, the - # client will use this id information (returned from RLS server) to correctly - # correlate the multiple descriptors/descriptor sets that have been granted - # with same quota (i.e., share the same quota among multiple descriptors or - # descriptor sets.) If id is empty, this id field will be ignored. If quota - # for the same id changes (e.g. due to configuration update), the old quota - # will be overridden by the new one. Shared quotas referenced by ID will - # still adhere to expiration after `valid_until`. - id: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class RateLimitResponseDescriptorStatus(betterproto.Message): - """[#next-free-field: 6]""" - - # The response code for an individual descriptor. - code: "RateLimitResponseCode" = betterproto.enum_field(1) - # The current limit as configured by the server. Useful for debugging, etc. - current_limit: "RateLimitResponseRateLimit" = betterproto.message_field(2) - # The limit remaining in the current time unit. - limit_remaining: int = betterproto.uint32_field(3) - # Duration until reset of the current limit window. - duration_until_reset: timedelta = betterproto.message_field(4) - # Quota is available for a request if its descriptor set has cached quota - # available for all descriptors. This is for each individual descriptor in - # the descriptor set. The client will perform matches for each individual - # descriptor against available per-descriptor quota. If quota is available, a - # RLS request will not be made and the quota will be reduced by 1 for all - # matching descriptors. If there is not sufficient quota, there are three - # cases: 1. A cached entry exists for a RLS descriptor that is out-of-quota, - # but not expired. In this case, the request will be treated as - # OVER_LIMIT. 2. Some RLS descriptors have a cached entry that has valid - # quota but some RLS descriptors have no cached entry. This will trigger a - # new RLS request. When the result is returned, a single unit will be - # consumed from the quota for all matching descriptors. If the server - # did not provide a quota, such as the quota message is empty for some of - # the descriptors, then the request admission is determined by the - # :ref:`overall_code - # `. - # 3. All RLS descriptors lack a cached entry, this will trigger a new RLS - # request, When the result is returned, a single unit will be consumed - # from the quota for all matching descriptors. If the server did not - # provide a quota, such as the quota message is empty for some of the - # descriptors, then the request admission is determined by the - # :ref:`overall_code - # `. - # [#not-implemented-hide:] - quota: "RateLimitResponseQuota" = betterproto.message_field(5) - - -class RateLimitServiceStub(betterproto.ServiceStub): - async def should_rate_limit( - self, - *, - domain: str = "", - descriptors: Optional[ - List["___extensions_common_ratelimit_v3__.RateLimitDescriptor"] - ] = None, - hits_addend: int = 0 - ) -> "RateLimitResponse": - descriptors = descriptors or [] - - request = RateLimitRequest() - request.domain = domain - if descriptors is not None: - request.descriptors = descriptors - request.hits_addend = hits_addend - - return await self._unary_unary( - "/envoy.service.ratelimit.v3.RateLimitService/ShouldRateLimit", - request, - RateLimitResponse, - ) - - -class RateLimitServiceBase(ServiceBase): - async def should_rate_limit( - self, - domain: str, - descriptors: Optional[ - List["___extensions_common_ratelimit_v3__.RateLimitDescriptor"] - ], - hits_addend: int, - ) -> "RateLimitResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_should_rate_limit(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "domain": request.domain, - "descriptors": request.descriptors, - "hits_addend": request.hits_addend, - } - - response = await self.should_rate_limit(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.ratelimit.v3.RateLimitService/ShouldRateLimit": grpclib.const.Handler( - self.__rpc_should_rate_limit, - grpclib.const.Cardinality.UNARY_UNARY, - RateLimitRequest, - RateLimitResponse, - ), - } - - -from ....config.core import v3 as ___config_core_v3__ -from ....extensions.common.ratelimit import v3 as ___extensions_common_ratelimit_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/route/__init__.py b/src/envoy_data_plane/envoy/service/route/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/route/v3/__init__.py b/src/envoy_data_plane/envoy/service/route/v3/__init__.py deleted file mode 100644 index 1804e02..0000000 --- a/src/envoy_data_plane/envoy/service/route/v3/__init__.py +++ /dev/null @@ -1,358 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/route/v3/rds.proto, envoy/service/route/v3/srds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class SrdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -@dataclass(eq=False, repr=False) -class RdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 and - protoxform to upgrade the file. - """ - - pass - - -class ScopedRoutesDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_scoped_routes( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.route.v3.ScopedRoutesDiscoveryService/StreamScopedRoutes", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def delta_scoped_routes( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.route.v3.ScopedRoutesDiscoveryService/DeltaScopedRoutes", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def fetch_scoped_routes( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.route.v3.ScopedRoutesDiscoveryService/FetchScopedRoutes", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class RouteDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_routes( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.route.v3.RouteDiscoveryService/StreamRoutes", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def delta_routes( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.route.v3.RouteDiscoveryService/DeltaRoutes", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def fetch_routes( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.route.v3.RouteDiscoveryService/FetchRoutes", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class VirtualHostDiscoveryServiceStub(betterproto.ServiceStub): - async def delta_virtual_hosts( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.route.v3.VirtualHostDiscoveryService/DeltaVirtualHosts", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - -class ScopedRoutesDiscoveryServiceBase(ServiceBase): - async def stream_scoped_routes( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_scoped_routes( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_scoped_routes( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_scoped_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_scoped_routes, - stream, - request_kwargs, - ) - - async def __rpc_delta_scoped_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_scoped_routes, - stream, - request_kwargs, - ) - - async def __rpc_fetch_scoped_routes(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_scoped_routes(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.route.v3.ScopedRoutesDiscoveryService/StreamScopedRoutes": grpclib.const.Handler( - self.__rpc_stream_scoped_routes, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.route.v3.ScopedRoutesDiscoveryService/DeltaScopedRoutes": grpclib.const.Handler( - self.__rpc_delta_scoped_routes, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.route.v3.ScopedRoutesDiscoveryService/FetchScopedRoutes": grpclib.const.Handler( - self.__rpc_fetch_scoped_routes, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -class RouteDiscoveryServiceBase(ServiceBase): - async def stream_routes( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_routes( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_routes( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_routes, - stream, - request_kwargs, - ) - - async def __rpc_delta_routes(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_routes, - stream, - request_kwargs, - ) - - async def __rpc_fetch_routes(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_routes(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.route.v3.RouteDiscoveryService/StreamRoutes": grpclib.const.Handler( - self.__rpc_stream_routes, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.route.v3.RouteDiscoveryService/DeltaRoutes": grpclib.const.Handler( - self.__rpc_delta_routes, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.route.v3.RouteDiscoveryService/FetchRoutes": grpclib.const.Handler( - self.__rpc_fetch_routes, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -class VirtualHostDiscoveryServiceBase(ServiceBase): - async def delta_virtual_hosts( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_delta_virtual_hosts(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_virtual_hosts, - stream, - request_kwargs, - ) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.route.v3.VirtualHostDiscoveryService/DeltaVirtualHosts": grpclib.const.Handler( - self.__rpc_delta_virtual_hosts, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - } - - -from ...discovery import v3 as __discovery_v3__ diff --git a/src/envoy_data_plane/envoy/service/runtime/__init__.py b/src/envoy_data_plane/envoy/service/runtime/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/runtime/v3/__init__.py b/src/envoy_data_plane/envoy/service/runtime/v3/__init__.py deleted file mode 100644 index 787a8b3..0000000 --- a/src/envoy_data_plane/envoy/service/runtime/v3/__init__.py +++ /dev/null @@ -1,176 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/runtime/v3/rtds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class RtdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 - """ - - pass - - -@dataclass(eq=False, repr=False) -class Runtime(betterproto.Message): - """ - RTDS resource type. This describes a layer in the runtime virtual - filesystem. - """ - - # Runtime resource name. This makes the Runtime a self-describing xDS - # resource. - name: str = betterproto.string_field(1) - layer: "betterproto_lib_google_protobuf.Struct" = betterproto.message_field(2) - - -class RuntimeDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_runtime( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.runtime.v3.RuntimeDiscoveryService/StreamRuntime", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def delta_runtime( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.runtime.v3.RuntimeDiscoveryService/DeltaRuntime", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def fetch_runtime( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.runtime.v3.RuntimeDiscoveryService/FetchRuntime", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class RuntimeDiscoveryServiceBase(ServiceBase): - async def stream_runtime( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def delta_runtime( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_runtime( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_runtime(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_runtime, - stream, - request_kwargs, - ) - - async def __rpc_delta_runtime(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_runtime, - stream, - request_kwargs, - ) - - async def __rpc_fetch_runtime(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_runtime(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.runtime.v3.RuntimeDiscoveryService/StreamRuntime": grpclib.const.Handler( - self.__rpc_stream_runtime, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.runtime.v3.RuntimeDiscoveryService/DeltaRuntime": grpclib.const.Handler( - self.__rpc_delta_runtime, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.runtime.v3.RuntimeDiscoveryService/FetchRuntime": grpclib.const.Handler( - self.__rpc_fetch_runtime, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -from ...discovery import v3 as __discovery_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/secret/__init__.py b/src/envoy_data_plane/envoy/service/secret/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/secret/v3/__init__.py b/src/envoy_data_plane/envoy/service/secret/v3/__init__.py deleted file mode 100644 index 45d2ba8..0000000 --- a/src/envoy_data_plane/envoy/service/secret/v3/__init__.py +++ /dev/null @@ -1,162 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/secret/v3/sds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class SdsDummy(betterproto.Message): - """ - [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue - with importing services: https://github.com/google/protobuf/issues/4221 - """ - - pass - - -class SecretDiscoveryServiceStub(betterproto.ServiceStub): - async def delta_secrets( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DeltaDiscoveryRequest"], - Iterable["__discovery_v3__.DeltaDiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.secret.v3.SecretDiscoveryService/DeltaSecrets", - request_iterator, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ): - yield response - - async def stream_secrets( - self, - request_iterator: Union[ - AsyncIterable["__discovery_v3__.DiscoveryRequest"], - Iterable["__discovery_v3__.DiscoveryRequest"], - ], - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - - async for response in self._stream_stream( - "/envoy.service.secret.v3.SecretDiscoveryService/StreamSecrets", - request_iterator, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ): - yield response - - async def fetch_secrets( - self, - *, - version_info: str = "", - node: "___config_core_v3__.Node" = None, - resource_names: Optional[List[str]] = None, - type_url: str = "", - response_nonce: str = "", - error_detail: "____google_rpc__.Status" = None - ) -> "__discovery_v3__.DiscoveryResponse": - resource_names = resource_names or [] - - request = __discovery_v3__.DiscoveryRequest() - request.version_info = version_info - if node is not None: - request.node = node - request.resource_names = resource_names - request.type_url = type_url - request.response_nonce = response_nonce - if error_detail is not None: - request.error_detail = error_detail - - return await self._unary_unary( - "/envoy.service.secret.v3.SecretDiscoveryService/FetchSecrets", - request, - __discovery_v3__.DiscoveryResponse, - ) - - -class SecretDiscoveryServiceBase(ServiceBase): - async def delta_secrets( - self, request_iterator: AsyncIterator["__discovery_v3__.DeltaDiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DeltaDiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def stream_secrets( - self, request_iterator: AsyncIterator["__discovery_v3__.DiscoveryRequest"] - ) -> AsyncIterator["__discovery_v3__.DiscoveryResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_secrets( - self, - version_info: str, - node: "___config_core_v3__.Node", - resource_names: Optional[List[str]], - type_url: str, - response_nonce: str, - error_detail: "____google_rpc__.Status", - ) -> "__discovery_v3__.DiscoveryResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_delta_secrets(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.delta_secrets, - stream, - request_kwargs, - ) - - async def __rpc_stream_secrets(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_secrets, - stream, - request_kwargs, - ) - - async def __rpc_fetch_secrets(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "version_info": request.version_info, - "node": request.node, - "resource_names": request.resource_names, - "type_url": request.type_url, - "response_nonce": request.response_nonce, - "error_detail": request.error_detail, - } - - response = await self.fetch_secrets(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.secret.v3.SecretDiscoveryService/DeltaSecrets": grpclib.const.Handler( - self.__rpc_delta_secrets, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DeltaDiscoveryRequest, - __discovery_v3__.DeltaDiscoveryResponse, - ), - "/envoy.service.secret.v3.SecretDiscoveryService/StreamSecrets": grpclib.const.Handler( - self.__rpc_stream_secrets, - grpclib.const.Cardinality.STREAM_STREAM, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - "/envoy.service.secret.v3.SecretDiscoveryService/FetchSecrets": grpclib.const.Handler( - self.__rpc_fetch_secrets, - grpclib.const.Cardinality.UNARY_UNARY, - __discovery_v3__.DiscoveryRequest, - __discovery_v3__.DiscoveryResponse, - ), - } - - -from ...discovery import v3 as __discovery_v3__ diff --git a/src/envoy_data_plane/envoy/service/status/__init__.py b/src/envoy_data_plane/envoy/service/status/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/status/v2/__init__.py b/src/envoy_data_plane/envoy/service/status/v2/__init__.py deleted file mode 100644 index ea9292a..0000000 --- a/src/envoy_data_plane/envoy/service/status/v2/__init__.py +++ /dev/null @@ -1,154 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/status/v2/csds.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class ConfigStatus(betterproto.Enum): - """Status of a config.""" - - # Status info is not available/unknown. - UNKNOWN = 0 - # Management server has sent the config to client and received ACK. - SYNCED = 1 - # Config is not sent. - NOT_SENT = 2 - # Management server has sent the config to client but hasn’t received - # ACK/NACK. - STALE = 3 - # Management server has sent the config to client but received NACK. - ERROR = 4 - - -@dataclass(eq=False, repr=False) -class ClientStatusRequest(betterproto.Message): - """ - Request for client status of clients identified by a list of NodeMatchers. - """ - - # Management server can use these match criteria to identify clients. The - # match follows OR semantics. - node_matchers: List["___type_matcher__.NodeMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class PerXdsConfig(betterproto.Message): - """Detailed config (per xDS) with status. [#next-free-field: 6]""" - - status: "ConfigStatus" = betterproto.enum_field(1) - listener_config: "___admin_v2_alpha__.ListenersConfigDump" = ( - betterproto.message_field(2, group="per_xds_config") - ) - cluster_config: "___admin_v2_alpha__.ClustersConfigDump" = ( - betterproto.message_field(3, group="per_xds_config") - ) - route_config: "___admin_v2_alpha__.RoutesConfigDump" = betterproto.message_field( - 4, group="per_xds_config" - ) - scoped_route_config: "___admin_v2_alpha__.ScopedRoutesConfigDump" = ( - betterproto.message_field(5, group="per_xds_config") - ) - - -@dataclass(eq=False, repr=False) -class ClientConfig(betterproto.Message): - """All xds configs for a particular client.""" - - # Node for a particular client. - node: "___api_v2_core__.Node" = betterproto.message_field(1) - xds_config: List["PerXdsConfig"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ClientStatusResponse(betterproto.Message): - # Client configs for the clients specified in the ClientStatusRequest. - config: List["ClientConfig"] = betterproto.message_field(1) - - -class ClientStatusDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_client_status( - self, - request_iterator: Union[ - AsyncIterable["ClientStatusRequest"], Iterable["ClientStatusRequest"] - ], - ) -> AsyncIterator["ClientStatusResponse"]: - - async for response in self._stream_stream( - "/envoy.service.status.v2.ClientStatusDiscoveryService/StreamClientStatus", - request_iterator, - ClientStatusRequest, - ClientStatusResponse, - ): - yield response - - async def fetch_client_status( - self, *, node_matchers: Optional[List["___type_matcher__.NodeMatcher"]] = None - ) -> "ClientStatusResponse": - node_matchers = node_matchers or [] - - request = ClientStatusRequest() - if node_matchers is not None: - request.node_matchers = node_matchers - - return await self._unary_unary( - "/envoy.service.status.v2.ClientStatusDiscoveryService/FetchClientStatus", - request, - ClientStatusResponse, - ) - - -class ClientStatusDiscoveryServiceBase(ServiceBase): - async def stream_client_status( - self, request_iterator: AsyncIterator["ClientStatusRequest"] - ) -> AsyncIterator["ClientStatusResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_client_status( - self, node_matchers: Optional[List["___type_matcher__.NodeMatcher"]] - ) -> "ClientStatusResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_client_status(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_client_status, - stream, - request_kwargs, - ) - - async def __rpc_fetch_client_status(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "node_matchers": request.node_matchers, - } - - response = await self.fetch_client_status(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.status.v2.ClientStatusDiscoveryService/StreamClientStatus": grpclib.const.Handler( - self.__rpc_stream_client_status, - grpclib.const.Cardinality.STREAM_STREAM, - ClientStatusRequest, - ClientStatusResponse, - ), - "/envoy.service.status.v2.ClientStatusDiscoveryService/FetchClientStatus": grpclib.const.Handler( - self.__rpc_fetch_client_status, - grpclib.const.Cardinality.UNARY_UNARY, - ClientStatusRequest, - ClientStatusResponse, - ), - } - - -from ....admin import v2alpha as ___admin_v2_alpha__ -from ....api.v2 import core as ___api_v2_core__ -from ....type import matcher as ___type_matcher__ diff --git a/src/envoy_data_plane/envoy/service/status/v3/__init__.py b/src/envoy_data_plane/envoy/service/status/v3/__init__.py deleted file mode 100644 index 570ee73..0000000 --- a/src/envoy_data_plane/envoy/service/status/v3/__init__.py +++ /dev/null @@ -1,256 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/status/v3/csds.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import datetime -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class ConfigStatus(betterproto.Enum): - """Status of a config from a management server view.""" - - # Status info is not available/unknown. - UNKNOWN = 0 - # Management server has sent the config to client and received ACK. - SYNCED = 1 - # Config is not sent. - NOT_SENT = 2 - # Management server has sent the config to client but hasn’t received - # ACK/NACK. - STALE = 3 - # Management server has sent the config to client but received NACK. The - # attached config dump will be the latest config (the rejected one), since it - # is the persisted version in the management server. - ERROR = 4 - - -class ClientConfigStatus(betterproto.Enum): - """Config status from a client-side view.""" - - # Config status is not available/unknown. - CLIENT_UNKNOWN = 0 - # Client requested the config but hasn't received any config from management - # server yet. - CLIENT_REQUESTED = 1 - # Client received the config and replied with ACK. - CLIENT_ACKED = 2 - # Client received the config and replied with NACK. Notably, the attached - # config dump is not the NACKed version, but the most recent accepted one. If - # no config is accepted yet, the attached config dump will be empty. - CLIENT_NACKED = 3 - - -@dataclass(eq=False, repr=False) -class ClientStatusRequest(betterproto.Message): - """ - Request for client status of clients identified by a list of NodeMatchers. - """ - - # Management server can use these match criteria to identify clients. The - # match follows OR semantics. - node_matchers: List["___type_matcher_v3__.NodeMatcher"] = betterproto.message_field( - 1 - ) - # The node making the csds request. - node: "___config_core_v3__.Node" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class PerXdsConfig(betterproto.Message): - """Detailed config (per xDS) with status. [#next-free-field: 8]""" - - # Config status generated by management servers. Will not be present if the - # CSDS server is an xDS client. - status: "ConfigStatus" = betterproto.enum_field(1) - # Client config status is populated by xDS clients. Will not be present if - # the CSDS server is an xDS server. No matter what the client config status - # is, xDS clients should always dump the most recent accepted xDS config. .. - # attention:: This field is deprecated. Use :ref:`ClientResourceStatus - # ` for per-resource - # config status instead. - client_status: "ClientConfigStatus" = betterproto.enum_field(7) - listener_config: "___admin_v3__.ListenersConfigDump" = betterproto.message_field( - 2, group="per_xds_config" - ) - cluster_config: "___admin_v3__.ClustersConfigDump" = betterproto.message_field( - 3, group="per_xds_config" - ) - route_config: "___admin_v3__.RoutesConfigDump" = betterproto.message_field( - 4, group="per_xds_config" - ) - scoped_route_config: "___admin_v3__.ScopedRoutesConfigDump" = ( - betterproto.message_field(5, group="per_xds_config") - ) - endpoint_config: "___admin_v3__.EndpointsConfigDump" = betterproto.message_field( - 6, group="per_xds_config" - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.client_status: - warnings.warn( - "PerXdsConfig.client_status is deprecated", DeprecationWarning - ) - - -@dataclass(eq=False, repr=False) -class ClientConfig(betterproto.Message): - """All xds configs for a particular client.""" - - # Node for a particular client. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - # This field is deprecated in favor of generic_xds_configs which is much - # simpler and uniform in structure. - xds_config: List["PerXdsConfig"] = betterproto.message_field(2) - # Represents generic xDS config and the exact config structure depends on the - # type URL (like Cluster if it is CDS) - generic_xds_configs: List[ - "ClientConfigGenericXdsConfig" - ] = betterproto.message_field(3) - - def __post_init__(self) -> None: - super().__post_init__() - if self.xds_config: - warnings.warn("ClientConfig.xds_config is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ClientConfigGenericXdsConfig(betterproto.Message): - """ - GenericXdsConfig is used to specify the config status and the dump of any - xDS resource identified by their type URL. It is the generalized version of - the now deprecated ListenersConfigDump, ClustersConfigDump etc [#next-free- - field: 10] - """ - - # Type_url represents the fully qualified name of xDS resource type like - # envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. - type_url: str = betterproto.string_field(1) - # Name of the xDS resource - name: str = betterproto.string_field(2) - # This is the :ref:`version_info - # ` - # in the last processed xDS discovery response. If there are only static - # bootstrap listeners, this field will be "" - version_info: str = betterproto.string_field(3) - # The xDS resource config. Actual content depends on the type - xds_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(4) - # Timestamp when the xDS resource was last updated - last_updated: datetime = betterproto.message_field(5) - # Per xDS resource config status. It is generated by management servers. It - # will not be present if the CSDS server is an xDS client. - config_status: "ConfigStatus" = betterproto.enum_field(6) - # Per xDS resource status from the view of a xDS client - client_status: "___admin_v3__.ClientResourceStatus" = betterproto.enum_field(7) - # Set if the last update failed, cleared after the next successful update. - # The *error_state* field contains the rejected version of this particular - # resource along with the reason and timestamp. For successfully updated or - # acknowledged resource, this field should be empty. [#not-implemented-hide:] - error_state: "___admin_v3__.UpdateFailureState" = betterproto.message_field(8) - # Is static resource is true if it is specified in the config supplied - # through the file at the startup. - is_static_resource: bool = betterproto.bool_field(9) - - -@dataclass(eq=False, repr=False) -class ClientStatusResponse(betterproto.Message): - # Client configs for the clients specified in the ClientStatusRequest. - config: List["ClientConfig"] = betterproto.message_field(1) - - -class ClientStatusDiscoveryServiceStub(betterproto.ServiceStub): - async def stream_client_status( - self, - request_iterator: Union[ - AsyncIterable["ClientStatusRequest"], Iterable["ClientStatusRequest"] - ], - ) -> AsyncIterator["ClientStatusResponse"]: - - async for response in self._stream_stream( - "/envoy.service.status.v3.ClientStatusDiscoveryService/StreamClientStatus", - request_iterator, - ClientStatusRequest, - ClientStatusResponse, - ): - yield response - - async def fetch_client_status( - self, - *, - node_matchers: Optional[List["___type_matcher_v3__.NodeMatcher"]] = None, - node: "___config_core_v3__.Node" = None - ) -> "ClientStatusResponse": - node_matchers = node_matchers or [] - - request = ClientStatusRequest() - if node_matchers is not None: - request.node_matchers = node_matchers - if node is not None: - request.node = node - - return await self._unary_unary( - "/envoy.service.status.v3.ClientStatusDiscoveryService/FetchClientStatus", - request, - ClientStatusResponse, - ) - - -class ClientStatusDiscoveryServiceBase(ServiceBase): - async def stream_client_status( - self, request_iterator: AsyncIterator["ClientStatusRequest"] - ) -> AsyncIterator["ClientStatusResponse"]: - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def fetch_client_status( - self, - node_matchers: Optional[List["___type_matcher_v3__.NodeMatcher"]], - node: "___config_core_v3__.Node", - ) -> "ClientStatusResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_client_status(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - await self._call_rpc_handler_server_stream( - self.stream_client_status, - stream, - request_kwargs, - ) - - async def __rpc_fetch_client_status(self, stream: grpclib.server.Stream) -> None: - request = await stream.recv_message() - - request_kwargs = { - "node_matchers": request.node_matchers, - "node": request.node, - } - - response = await self.fetch_client_status(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.status.v3.ClientStatusDiscoveryService/StreamClientStatus": grpclib.const.Handler( - self.__rpc_stream_client_status, - grpclib.const.Cardinality.STREAM_STREAM, - ClientStatusRequest, - ClientStatusResponse, - ), - "/envoy.service.status.v3.ClientStatusDiscoveryService/FetchClientStatus": grpclib.const.Handler( - self.__rpc_fetch_client_status, - grpclib.const.Cardinality.UNARY_UNARY, - ClientStatusRequest, - ClientStatusResponse, - ), - } - - -from ....admin import v3 as ___admin_v3__ -from ....config.core import v3 as ___config_core_v3__ -from ....type.matcher import v3 as ___type_matcher_v3__ -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/envoy/service/tap/__init__.py b/src/envoy_data_plane/envoy/service/tap/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/tap/v2alpha/__init__.py b/src/envoy_data_plane/envoy/service/tap/v2alpha/__init__.py deleted file mode 100644 index f9aa663..0000000 --- a/src/envoy_data_plane/envoy/service/tap/v2alpha/__init__.py +++ /dev/null @@ -1,257 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/tap/v2alpha/common.proto, envoy/service/tap/v2alpha/tap.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Optional, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -class OutputSinkFormat(betterproto.Enum): - JSON_BODY_AS_BYTES = 0 - JSON_BODY_AS_STRING = 1 - PROTO_BINARY = 2 - PROTO_BINARY_LENGTH_DELIMITED = 3 - PROTO_TEXT = 4 - - -@dataclass(eq=False, repr=False) -class TapConfig(betterproto.Message): - """Tap configuration.""" - - # The match configuration. If the configuration matches the data source being - # tapped, a tap will occur, with the result written to the configured output. - match_config: "MatchPredicate" = betterproto.message_field(1) - # The tap output configuration. If a match configuration matches a data - # source being tapped, a tap will occur and the data will be written to the - # configured output. - output_config: "OutputConfig" = betterproto.message_field(2) - # [#not-implemented-hide:] Specify if Tap matching is enabled. The % of - # requests\connections for which the tap matching is enabled. When not - # enabled, the request\connection will not be recorded. .. note:: This - # field defaults to 100/:ref:`HUNDRED - # `. - tap_enabled: "___api_v2_core__.RuntimeFractionalPercent" = ( - betterproto.message_field(3) - ) - - -@dataclass(eq=False, repr=False) -class MatchPredicate(betterproto.Message): - """ - Tap match configuration. This is a recursive structure which allows complex - nested match configurations to be built using various logical operators. - [#next-free-field: 9] - """ - - # A set that describes a logical OR. If any member of the set matches, the - # match configuration matches. - or_match: "MatchPredicateMatchSet" = betterproto.message_field(1, group="rule") - # A set that describes a logical AND. If all members of the set match, the - # match configuration matches. - and_match: "MatchPredicateMatchSet" = betterproto.message_field(2, group="rule") - # A negation match. The match configuration will match if the negated match - # condition matches. - not_match: "MatchPredicate" = betterproto.message_field(3, group="rule") - # The match configuration will always match. - any_match: bool = betterproto.bool_field(4, group="rule") - # HTTP request headers match configuration. - http_request_headers_match: "HttpHeadersMatch" = betterproto.message_field( - 5, group="rule" - ) - # HTTP request trailers match configuration. - http_request_trailers_match: "HttpHeadersMatch" = betterproto.message_field( - 6, group="rule" - ) - # HTTP response headers match configuration. - http_response_headers_match: "HttpHeadersMatch" = betterproto.message_field( - 7, group="rule" - ) - # HTTP response trailers match configuration. - http_response_trailers_match: "HttpHeadersMatch" = betterproto.message_field( - 8, group="rule" - ) - - -@dataclass(eq=False, repr=False) -class MatchPredicateMatchSet(betterproto.Message): - """A set of match configurations used for logical operations.""" - - # The list of rules that make up the set. - rules: List["MatchPredicate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class HttpHeadersMatch(betterproto.Message): - """HTTP headers match configuration.""" - - # HTTP headers to match. - headers: List["___api_v2_route__.HeaderMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class OutputConfig(betterproto.Message): - """Tap output configuration.""" - - # Output sinks for tap data. Currently a single sink is allowed in the list. - # Once multiple sink types are supported this constraint will be relaxed. - sinks: List["OutputSink"] = betterproto.message_field(1) - # For buffered tapping, the maximum amount of received body that will be - # buffered prior to truncation. If truncation occurs, the :ref:`truncated - # ` field will be set. If - # not specified, the default is 1KiB. - max_buffered_rx_bytes: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # For buffered tapping, the maximum amount of transmitted body that will be - # buffered prior to truncation. If truncation occurs, the :ref:`truncated - # ` field will be set. If - # not specified, the default is 1KiB. - max_buffered_tx_bytes: Optional[int] = betterproto.message_field( - 3, wraps=betterproto.TYPE_UINT32 - ) - # Indicates whether taps produce a single buffered message per tap, or - # multiple streamed messages per tap in the emitted :ref:`TraceWrapper - # ` messages. Note that streamed - # tapping does not mean that no buffering takes place. Buffering may be - # required if data is processed before a match can be determined. See the - # HTTP tap filter :ref:`streaming ` - # documentation for more information. - streaming: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class OutputSink(betterproto.Message): - """Tap output sink configuration.""" - - # Sink output format. - format: "OutputSinkFormat" = betterproto.enum_field(1) - # Tap output will be streamed out the :http:post:`/tap` admin endpoint. .. - # attention:: It is only allowed to specify the streaming admin output sink - # if the tap is being configured from the :http:post:`/tap` admin endpoint. - # Thus, if an extension has been configured to receive tap configuration - # from some other source (e.g., static file, XDS, etc.) configuring the - # streaming admin output type will fail. - streaming_admin: "StreamingAdminSink" = betterproto.message_field( - 2, group="output_sink_type" - ) - # Tap output will be written to a file per tap sink. - file_per_tap: "FilePerTapSink" = betterproto.message_field( - 3, group="output_sink_type" - ) - # [#not-implemented-hide:] GrpcService to stream data to. The format argument - # must be PROTO_BINARY. - streaming_grpc: "StreamingGrpcSink" = betterproto.message_field( - 4, group="output_sink_type" - ) - - -@dataclass(eq=False, repr=False) -class StreamingAdminSink(betterproto.Message): - """Streaming admin sink configuration.""" - - pass - - -@dataclass(eq=False, repr=False) -class FilePerTapSink(betterproto.Message): - """ - The file per tap sink outputs a discrete file for every tapped stream. - """ - - # Path prefix. The output file will be of the form _.pb, - # where is an identifier distinguishing the recorded trace for stream - # instances (the Envoy connection ID, HTTP stream ID, etc.). - path_prefix: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class StreamingGrpcSink(betterproto.Message): - """ - [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps - to an external gRPC server. - """ - - # Opaque identifier, that will be sent back to the streaming grpc server. - tap_id: str = betterproto.string_field(1) - # The gRPC server that hosts the Tap Sink Service. - grpc_service: "___api_v2_core__.GrpcService" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StreamTapsRequest(betterproto.Message): - """ - [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a - stream to the server and stream taps without ever expecting a response. - """ - - # Identifier data effectively is a structured metadata. As a performance - # optimization this will only be sent in the first message on the stream. - identifier: "StreamTapsRequestIdentifier" = betterproto.message_field(1) - # The trace id. this can be used to merge together a streaming trace. Note - # that the trace_id is not guaranteed to be spatially or temporally unique. - trace_id: int = betterproto.uint64_field(2) - # The trace data. - trace: "___data_tap_v2_alpha__.TraceWrapper" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class StreamTapsRequestIdentifier(betterproto.Message): - # The node sending taps over the stream. - node: "___api_v2_core__.Node" = betterproto.message_field(1) - # The opaque identifier that was set in the :ref:`output config - # `. - tap_id: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class StreamTapsResponse(betterproto.Message): - """[#not-implemented-hide:]""" - - pass - - -class TapSinkServiceStub(betterproto.ServiceStub): - async def stream_taps( - self, - request_iterator: Union[ - AsyncIterable["StreamTapsRequest"], Iterable["StreamTapsRequest"] - ], - ) -> "StreamTapsResponse": - - return await self._stream_unary( - "/envoy.service.tap.v2alpha.TapSinkService/StreamTaps", - request_iterator, - StreamTapsRequest, - StreamTapsResponse, - ) - - -class TapSinkServiceBase(ServiceBase): - async def stream_taps( - self, request_iterator: AsyncIterator["StreamTapsRequest"] - ) -> "StreamTapsResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_taps(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_taps(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.tap.v2alpha.TapSinkService/StreamTaps": grpclib.const.Handler( - self.__rpc_stream_taps, - grpclib.const.Cardinality.STREAM_UNARY, - StreamTapsRequest, - StreamTapsResponse, - ), - } - - -from ....api.v2 import core as ___api_v2_core__ -from ....api.v2 import route as ___api_v2_route__ -from ....data.tap import v2alpha as ___data_tap_v2_alpha__ diff --git a/src/envoy_data_plane/envoy/service/tap/v3/__init__.py b/src/envoy_data_plane/envoy/service/tap/v3/__init__.py deleted file mode 100644 index a4f6139..0000000 --- a/src/envoy_data_plane/envoy/service/tap/v3/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/tap/v3/tap.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamTapsRequest(betterproto.Message): - """ - [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a - stream to the server and stream taps without ever expecting a response. - """ - - # Identifier data effectively is a structured metadata. As a performance - # optimization this will only be sent in the first message on the stream. - identifier: "StreamTapsRequestIdentifier" = betterproto.message_field(1) - # The trace id. this can be used to merge together a streaming trace. Note - # that the trace_id is not guaranteed to be spatially or temporally unique. - trace_id: int = betterproto.uint64_field(2) - # The trace data. - trace: "___data_tap_v3__.TraceWrapper" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class StreamTapsRequestIdentifier(betterproto.Message): - # The node sending taps over the stream. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - # The opaque identifier that was set in the :ref:`output config - # `. - tap_id: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class StreamTapsResponse(betterproto.Message): - """[#not-implemented-hide:]""" - - pass - - -class TapSinkServiceStub(betterproto.ServiceStub): - async def stream_taps( - self, - request_iterator: Union[ - AsyncIterable["StreamTapsRequest"], Iterable["StreamTapsRequest"] - ], - ) -> "StreamTapsResponse": - - return await self._stream_unary( - "/envoy.service.tap.v3.TapSinkService/StreamTaps", - request_iterator, - StreamTapsRequest, - StreamTapsResponse, - ) - - -class TapSinkServiceBase(ServiceBase): - async def stream_taps( - self, request_iterator: AsyncIterator["StreamTapsRequest"] - ) -> "StreamTapsResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_taps(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_taps(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.tap.v3.TapSinkService/StreamTaps": grpclib.const.Handler( - self.__rpc_stream_taps, - grpclib.const.Cardinality.STREAM_UNARY, - StreamTapsRequest, - StreamTapsResponse, - ), - } - - -from ....config.core import v3 as ___config_core_v3__ -from ....data.tap import v3 as ___data_tap_v3__ diff --git a/src/envoy_data_plane/envoy/service/trace/__init__.py b/src/envoy_data_plane/envoy/service/trace/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/service/trace/v2/__init__.py b/src/envoy_data_plane/envoy/service/trace/v2/__init__.py deleted file mode 100644 index 58e0477..0000000 --- a/src/envoy_data_plane/envoy/service/trace/v2/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/trace/v2/trace_service.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamTracesResponse(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class StreamTracesMessage(betterproto.Message): - # Identifier data effectively is a structured metadata. As a performance - # optimization this will only be sent in the first message on the stream. - identifier: "StreamTracesMessageIdentifier" = betterproto.message_field(1) - # A list of Span entries - spans: List["____opencensus_proto_trace_v1__.Span"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StreamTracesMessageIdentifier(betterproto.Message): - # The node sending the access log messages over the stream. - node: "___api_v2_core__.Node" = betterproto.message_field(1) - - -class TraceServiceStub(betterproto.ServiceStub): - async def stream_traces( - self, - request_iterator: Union[ - AsyncIterable["StreamTracesMessage"], Iterable["StreamTracesMessage"] - ], - ) -> "StreamTracesResponse": - - return await self._stream_unary( - "/envoy.service.trace.v2.TraceService/StreamTraces", - request_iterator, - StreamTracesMessage, - StreamTracesResponse, - ) - - -class TraceServiceBase(ServiceBase): - async def stream_traces( - self, request_iterator: AsyncIterator["StreamTracesMessage"] - ) -> "StreamTracesResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_traces(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_traces(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.trace.v2.TraceService/StreamTraces": grpclib.const.Handler( - self.__rpc_stream_traces, - grpclib.const.Cardinality.STREAM_UNARY, - StreamTracesMessage, - StreamTracesResponse, - ), - } - - -from .....opencensus.proto.trace import v1 as ____opencensus_proto_trace_v1__ -from ....api.v2 import core as ___api_v2_core__ diff --git a/src/envoy_data_plane/envoy/service/trace/v3/__init__.py b/src/envoy_data_plane/envoy/service/trace/v3/__init__.py deleted file mode 100644 index 3639c74..0000000 --- a/src/envoy_data_plane/envoy/service/trace/v3/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/service/trace/v3/trace_service.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import AsyncIterable, AsyncIterator, Dict, Iterable, List, Union - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase -import grpclib - - -@dataclass(eq=False, repr=False) -class StreamTracesResponse(betterproto.Message): - pass - - -@dataclass(eq=False, repr=False) -class StreamTracesMessage(betterproto.Message): - # Identifier data effectively is a structured metadata. As a performance - # optimization this will only be sent in the first message on the stream. - identifier: "StreamTracesMessageIdentifier" = betterproto.message_field(1) - # A list of Span entries - spans: List["____opencensus_proto_trace_v1__.Span"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StreamTracesMessageIdentifier(betterproto.Message): - # The node sending the access log messages over the stream. - node: "___config_core_v3__.Node" = betterproto.message_field(1) - - -class TraceServiceStub(betterproto.ServiceStub): - async def stream_traces( - self, - request_iterator: Union[ - AsyncIterable["StreamTracesMessage"], Iterable["StreamTracesMessage"] - ], - ) -> "StreamTracesResponse": - - return await self._stream_unary( - "/envoy.service.trace.v3.TraceService/StreamTraces", - request_iterator, - StreamTracesMessage, - StreamTracesResponse, - ) - - -class TraceServiceBase(ServiceBase): - async def stream_traces( - self, request_iterator: AsyncIterator["StreamTracesMessage"] - ) -> "StreamTracesResponse": - raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) - - async def __rpc_stream_traces(self, stream: grpclib.server.Stream) -> None: - request_kwargs = {"request_iterator": stream.__aiter__()} - - response = await self.stream_traces(**request_kwargs) - await stream.send_message(response) - - def __mapping__(self) -> Dict[str, grpclib.const.Handler]: - return { - "/envoy.service.trace.v3.TraceService/StreamTraces": grpclib.const.Handler( - self.__rpc_stream_traces, - grpclib.const.Cardinality.STREAM_UNARY, - StreamTracesMessage, - StreamTracesResponse, - ), - } - - -from .....opencensus.proto.trace import v1 as ____opencensus_proto_trace_v1__ -from ....config.core import v3 as ___config_core_v3__ diff --git a/src/envoy_data_plane/envoy/type/__init__.py b/src/envoy_data_plane/envoy/type/__init__.py deleted file mode 100644 index 143ff4a..0000000 --- a/src/envoy_data_plane/envoy/type/__init__.py +++ /dev/null @@ -1,213 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/hash_policy.proto, envoy/type/http.proto, envoy/type/http_status.proto, envoy/type/percent.proto, envoy/type/range.proto, envoy/type/semantic_version.proto, envoy/type/token_bucket.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class FractionalPercentDenominatorType(betterproto.Enum): - HUNDRED = 0 - TEN_THOUSAND = 1 - MILLION = 2 - - -class CodecClientType(betterproto.Enum): - HTTP1 = 0 - HTTP2 = 1 - # [#not-implemented-hide:] QUIC implementation is not production ready yet. - # Use this enum with caution to prevent accidental execution of QUIC code. - # I.e. `!= HTTP2` is no longer sufficient to distinguish HTTP1 and HTTP2 - # traffic. - HTTP3 = 2 - - -class StatusCode(betterproto.Enum): - """ - HTTP response codes supported in Envoy. For more details: - https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml - """ - - # Empty - This code not part of the HTTP status code specification, but it is - # needed for proto `enum` type. - Empty = 0 - Continue = 100 - OK = 200 - Created = 201 - Accepted = 202 - NonAuthoritativeInformation = 203 - NoContent = 204 - ResetContent = 205 - PartialContent = 206 - MultiStatus = 207 - AlreadyReported = 208 - IMUsed = 226 - MultipleChoices = 300 - MovedPermanently = 301 - Found = 302 - SeeOther = 303 - NotModified = 304 - UseProxy = 305 - TemporaryRedirect = 307 - PermanentRedirect = 308 - BadRequest = 400 - Unauthorized = 401 - PaymentRequired = 402 - Forbidden = 403 - NotFound = 404 - MethodNotAllowed = 405 - NotAcceptable = 406 - ProxyAuthenticationRequired = 407 - RequestTimeout = 408 - Conflict = 409 - Gone = 410 - LengthRequired = 411 - PreconditionFailed = 412 - PayloadTooLarge = 413 - URITooLong = 414 - UnsupportedMediaType = 415 - RangeNotSatisfiable = 416 - ExpectationFailed = 417 - MisdirectedRequest = 421 - UnprocessableEntity = 422 - Locked = 423 - FailedDependency = 424 - UpgradeRequired = 426 - PreconditionRequired = 428 - TooManyRequests = 429 - RequestHeaderFieldsTooLarge = 431 - InternalServerError = 500 - NotImplemented = 501 - BadGateway = 502 - ServiceUnavailable = 503 - GatewayTimeout = 504 - HTTPVersionNotSupported = 505 - VariantAlsoNegotiates = 506 - InsufficientStorage = 507 - LoopDetected = 508 - NotExtended = 510 - NetworkAuthenticationRequired = 511 - - -@dataclass(eq=False, repr=False) -class Percent(betterproto.Message): - """Identifies a percentage, in the range [0.0, 100.0].""" - - value: float = betterproto.double_field(1) - - -@dataclass(eq=False, repr=False) -class FractionalPercent(betterproto.Message): - """ - A fractional percentage is used in cases in which for performance reasons - performing floating point to integer conversions during randomness - calculations is undesirable. The message includes both a numerator and - denominator that together determine the final fractional value. * - **Example**: 1/100 = 1%. * **Example**: 3/10000 = 0.03%. - """ - - # Specifies the numerator. Defaults to 0. - numerator: int = betterproto.uint32_field(1) - # Specifies the denominator. If the denominator specified is less than the - # numerator, the final fractional percentage is capped at 1 (100%). - denominator: "FractionalPercentDenominatorType" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class SemanticVersion(betterproto.Message): - """ - Envoy uses SemVer (https://semver.org/). Major/minor versions indicate - expected behaviors and APIs, the patch version field is used only for - security fixes and can be generally ignored. - """ - - major_number: int = betterproto.uint32_field(1) - minor_number: int = betterproto.uint32_field(2) - patch: int = betterproto.uint32_field(3) - - -@dataclass(eq=False, repr=False) -class Int64Range(betterproto.Message): - """ - Specifies the int64 start and end of the range using half-open interval - semantics [start, end). - """ - - # start of the range (inclusive) - start: int = betterproto.int64_field(1) - # end of the range (exclusive) - end: int = betterproto.int64_field(2) - - -@dataclass(eq=False, repr=False) -class Int32Range(betterproto.Message): - """ - Specifies the int32 start and end of the range using half-open interval - semantics [start, end). - """ - - # start of the range (inclusive) - start: int = betterproto.int32_field(1) - # end of the range (exclusive) - end: int = betterproto.int32_field(2) - - -@dataclass(eq=False, repr=False) -class DoubleRange(betterproto.Message): - """ - Specifies the double start and end of the range using half-open interval - semantics [start, end). - """ - - # start of the range (inclusive) - start: float = betterproto.double_field(1) - # end of the range (exclusive) - end: float = betterproto.double_field(2) - - -@dataclass(eq=False, repr=False) -class HashPolicy(betterproto.Message): - """Specifies the hash policy""" - - source_ip: "HashPolicySourceIp" = betterproto.message_field( - 1, group="policy_specifier" - ) - - -@dataclass(eq=False, repr=False) -class HashPolicySourceIp(betterproto.Message): - """ - The source IP will be used to compute the hash used by hash-based load - balancing algorithms. - """ - - pass - - -@dataclass(eq=False, repr=False) -class TokenBucket(betterproto.Message): - """Configures a token bucket, typically used for rate limiting.""" - - # The maximum tokens that the bucket can hold. This is also the number of - # tokens that the bucket initially contains. - max_tokens: int = betterproto.uint32_field(1) - # The number of tokens added to the bucket during each fill interval. If not - # specified, defaults to a single token. - tokens_per_fill: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The fill interval that tokens are added to the bucket. During each fill - # interval `tokens_per_fill` are added to the bucket. The bucket will never - # contain more than `max_tokens` tokens. - fill_interval: timedelta = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class HttpStatus(betterproto.Message): - """HTTP status.""" - - # Supplies HTTP response code. - code: "StatusCode" = betterproto.enum_field(1) diff --git a/src/envoy_data_plane/envoy/type/http/__init__.py b/src/envoy_data_plane/envoy/type/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/type/http/v3/__init__.py b/src/envoy_data_plane/envoy/type/http/v3/__init__.py deleted file mode 100644 index a666248..0000000 --- a/src/envoy_data_plane/envoy/type/http/v3/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/http/v3/cookie.proto, envoy/type/http/v3/path_transformation.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class PathTransformation(betterproto.Message): - # A list of operations to apply. Transformations will be performed in the - # order that they appear. - operations: List["PathTransformationOperation"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class PathTransformationOperation(betterproto.Message): - """A type of operation to alter text.""" - - # Enable path normalization per RFC 3986. - normalize_path_rfc_3986: "PathTransformationOperationNormalizePathRfc3986" = ( - betterproto.message_field(2, group="operation_specifier") - ) - # Enable merging adjacent slashes. - merge_slashes: "PathTransformationOperationMergeSlashes" = ( - betterproto.message_field(3, group="operation_specifier") - ) - - -@dataclass(eq=False, repr=False) -class PathTransformationOperationNormalizePathRfc3986(betterproto.Message): - """ - Should text be normalized according to RFC 3986? This typically is used for - path headers before any processing of requests by HTTP filters or routing. - This applies percent-encoded normalization and path segment normalization. - Fails on characters disallowed in URLs (e.g. NULLs). See `Normalization and - Comparison `_ for details of - normalization. Note that this options does not perform `case normalization - `_ - """ - - pass - - -@dataclass(eq=False, repr=False) -class PathTransformationOperationMergeSlashes(betterproto.Message): - """ - Determines if adjacent slashes are merged into one. A common use case is - for a request path header. Using this option in `:ref: - PathNormalizationOptions ` will - allow incoming requests with path `//dir///file` to match against route - with `prefix` match set to `/dir`. When using for header transformations, - note that slash merging is not part of `HTTP spec - `_ and is provided for convenience. - """ - - pass - - -@dataclass(eq=False, repr=False) -class Cookie(betterproto.Message): - """Cookie defines an API for obtaining or generating HTTP cookie.""" - - # The name that will be used to obtain cookie value from downstream HTTP - # request or generate new cookie for downstream. - name: str = betterproto.string_field(1) - # Duration of cookie. This will be used to set the expiry time of a new - # cookie when it is generated. Set this to 0 to use a session cookie. - ttl: timedelta = betterproto.message_field(2) - # Path of cookie. This will be used to set the path of a new cookie when it - # is generated. If no path is specified here, no path will be set for the - # cookie. - path: str = betterproto.string_field(3) diff --git a/src/envoy_data_plane/envoy/type/matcher/__init__.py b/src/envoy_data_plane/envoy/type/matcher/__init__.py deleted file mode 100644 index 2af4d7b..0000000 --- a/src/envoy_data_plane/envoy/type/matcher/__init__.py +++ /dev/null @@ -1,280 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/matcher/metadata.proto, envoy/type/matcher/node.proto, envoy/type/matcher/number.proto, envoy/type/matcher/path.proto, envoy/type/matcher/regex.proto, envoy/type/matcher/string.proto, envoy/type/matcher/struct.proto, envoy/type/matcher/value.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RegexMatcher(betterproto.Message): - """A regex matcher designed for safety when used with untrusted input.""" - - # Google's RE2 regex engine. - google_re2: "RegexMatcherGoogleRe2" = betterproto.message_field( - 1, group="engine_type" - ) - # The regex match string. The string must be supported by the configured - # engine. - regex: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RegexMatcherGoogleRe2(betterproto.Message): - """ - Google's `RE2 `_ regex engine. The regex - string must adhere to the documented `syntax - `_. The engine is designed to - complete execution in linear time as well as limit the amount of memory - used. Envoy supports program size checking via runtime. The runtime keys - `re2.max_program_size.error_level` and `re2.max_program_size.warn_level` - can be set to integers as the maximum program size or complexity that a - compiled regex can have before an exception is thrown or a warning is - logged, respectively. `re2.max_program_size.error_level` defaults to 100, - and `re2.max_program_size.warn_level` has no default if unset (will not - check/log a warning). Envoy emits two stats for tracking the program size - of regexes: the histogram `re2.program_size`, which records the program - size, and the counter `re2.exceeded_warn_level`, which is incremented each - time the program size exceeds the warn level threshold. - """ - - # This field controls the RE2 "program size" which is a rough estimate of how - # complex a compiled regex is to evaluate. A regex that has a program size - # greater than the configured value will fail to compile. In this case, the - # configured max program size can be increased or the regex can be - # simplified. If not specified, the default is 100. This field is deprecated; - # regexp validation should be performed on the management server instead of - # being done by each individual client. - max_program_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.max_program_size: - warnings.warn( - "RegexMatcherGoogleRe2.max_program_size is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class RegexMatchAndSubstitute(betterproto.Message): - """ - Describes how to match a string and then produce a new string using a - regular expression and a substitution string. - """ - - # The regular expression used to find portions of a string (hereafter called - # the "subject string") that should be replaced. When a new string is - # produced during the substitution operation, the new string is initially the - # same as the subject string, but then all matches in the subject string are - # replaced by the substitution string. If replacing all matches isn't - # desired, regular expression anchors can be used to ensure a single match, - # so as to replace just one occurrence of a pattern. Capture groups can be - # used in the pattern to extract portions of the subject string, and then - # referenced in the substitution string. - pattern: "RegexMatcher" = betterproto.message_field(1) - # The string that should be substituted into matching portions of the subject - # string during a substitution operation to produce a new string. Capture - # groups in the pattern can be referenced in the substitution string. Note, - # however, that the syntax for referring to capture groups is defined by the - # chosen regular expression engine. Google's `RE2 - # `_ regular expression engine uses a - # backslash followed by the capture group number to denote a numbered capture - # group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers to capture - # group 2. - substitution: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class StringMatcher(betterproto.Message): - """Specifies the way to match a string. [#next-free-field: 7]""" - - # The input string must match exactly the string specified here. Examples: * - # *abc* only matches the value *abc*. - exact: str = betterproto.string_field(1, group="match_pattern") - # The input string must have the prefix specified here. Note: empty prefix is - # not allowed, please use regex instead. Examples: * *abc* matches the value - # *abc.xyz* - prefix: str = betterproto.string_field(2, group="match_pattern") - # The input string must have the suffix specified here. Note: empty prefix is - # not allowed, please use regex instead. Examples: * *abc* matches the value - # *xyz.abc* - suffix: str = betterproto.string_field(3, group="match_pattern") - # The input string must match the regular expression specified here. The - # regex grammar is defined `here - # `_. Examples: * The - # regex ``\d{3}`` matches the value *123* * The regex ``\d{3}`` does not - # match the value *1234* * The regex ``\d{3}`` does not match the value - # *123.456* .. attention:: This field has been deprecated in favor of - # `safe_regex` as it is not safe for use with untrusted input in all cases. - regex: str = betterproto.string_field(4, group="match_pattern") - # The input string must match the regular expression specified here. - safe_regex: "RegexMatcher" = betterproto.message_field(5, group="match_pattern") - # If true, indicates the exact/prefix/suffix matching should be case - # insensitive. This has no effect for the safe_regex match. For example, the - # matcher *data* will match both input string *Data* and *data* if set to - # true. - ignore_case: bool = betterproto.bool_field(6) - - def __post_init__(self) -> None: - super().__post_init__() - if self.regex: - warnings.warn("StringMatcher.regex is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class ListStringMatcher(betterproto.Message): - """Specifies a list of ways to match a string.""" - - patterns: List["StringMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DoubleMatcher(betterproto.Message): - """Specifies the way to match a double value.""" - - # If specified, the input double value must be in the range specified here. - # Note: The range is using half-open interval semantics [start, end). - range: "__type__.DoubleRange" = betterproto.message_field(1, group="match_pattern") - # If specified, the input double value must be equal to the value specified - # here. - exact: float = betterproto.double_field(2, group="match_pattern") - - -@dataclass(eq=False, repr=False) -class ValueMatcher(betterproto.Message): - """ - Specifies the way to match a ProtobufWkt::Value. Primitive values and - ListValue are supported. StructValue is not supported and is always not - matched. [#next-free-field: 7] - """ - - # If specified, a match occurs if and only if the target value is a - # NullValue. - null_match: "ValueMatcherNullMatch" = betterproto.message_field( - 1, group="match_pattern" - ) - # If specified, a match occurs if and only if the target value is a double - # value and is matched to this field. - double_match: "DoubleMatcher" = betterproto.message_field(2, group="match_pattern") - # If specified, a match occurs if and only if the target value is a string - # value and is matched to this field. - string_match: "StringMatcher" = betterproto.message_field(3, group="match_pattern") - # If specified, a match occurs if and only if the target value is a bool - # value and is equal to this field. - bool_match: bool = betterproto.bool_field(4, group="match_pattern") - # If specified, value match will be performed based on whether the path is - # referring to a valid primitive value in the metadata. If the path is - # referring to a non-primitive value, the result is always not matched. - present_match: bool = betterproto.bool_field(5, group="match_pattern") - # If specified, a match occurs if and only if the target value is a list - # value and is matched to this field. - list_match: "ListMatcher" = betterproto.message_field(6, group="match_pattern") - - -@dataclass(eq=False, repr=False) -class ValueMatcherNullMatch(betterproto.Message): - """NullMatch is an empty message to specify a null value.""" - - pass - - -@dataclass(eq=False, repr=False) -class ListMatcher(betterproto.Message): - """Specifies the way to match a list value.""" - - # If specified, at least one of the values in the list must match the value - # specified. - one_of: "ValueMatcher" = betterproto.message_field(1, group="match_pattern") - - -@dataclass(eq=False, repr=False) -class MetadataMatcher(betterproto.Message): - """[#next-major-version: MetadataMatcher should use StructMatcher]""" - - # The filter name to retrieve the Struct from the Metadata. - filter: str = betterproto.string_field(1) - # The path to retrieve the Value from the Struct. - path: List["MetadataMatcherPathSegment"] = betterproto.message_field(2) - # The MetadataMatcher is matched if the value retrieved by path is matched to - # this value. - value: "ValueMatcher" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class MetadataMatcherPathSegment(betterproto.Message): - """ - Specifies the segment in a path to retrieve value from Metadata. Note: - Currently it's not supported to retrieve a value from a list in Metadata. - This means that if the segment key refers to a list, it has to be the last - segment in a path. - """ - - # If specified, use the key to retrieve the value in a Struct. - key: str = betterproto.string_field(1, group="segment") - - -@dataclass(eq=False, repr=False) -class PathMatcher(betterproto.Message): - """Specifies the way to match a path on HTTP request.""" - - # The `path` must match the URL path portion of the :path header. The query - # and fragment string (if present) are removed in the URL path portion. For - # example, the path */data* will match the *:path* header - # */data#fragment?param=value*. - path: "StringMatcher" = betterproto.message_field(1, group="rule") - - -@dataclass(eq=False, repr=False) -class StructMatcher(betterproto.Message): - """ - StructMatcher provides a general interface to check if a given value is - matched in google.protobuf.Struct. It uses `path` to retrieve the value - from the struct and then check if it's matched to the specified value. For - example, for the following Struct: .. code-block:: yaml fields: - a: struct_value: fields: b: - struct_value: fields: c: - string_value: pro t: list_value: - values: - string_value: m - - string_value: n The following MetadataMatcher is matched as the path [a, b, - c] will retrieve a string value "pro" from the Metadata which is matched to - the specified prefix match. .. code-block:: yaml path: - key: a - - key: b - key: c value: string_match: prefix: pr The - following StructMatcher is matched as the code will match one of the string - values in the list at the path [a, t]. .. code-block:: yaml path: - - key: a - key: t value: list_match: one_of: - string_match: exact: m An example use of StructMatcher is to - match metadata in envoy.v*.core.Node. - """ - - # The path to retrieve the Value from the Struct. - path: List["StructMatcherPathSegment"] = betterproto.message_field(2) - # The StructMatcher is matched if the value retrieved by path is matched to - # this value. - value: "ValueMatcher" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class StructMatcherPathSegment(betterproto.Message): - """Specifies the segment in a path to retrieve value from Struct.""" - - # If specified, use the key to retrieve the value in a Struct. - key: str = betterproto.string_field(1, group="segment") - - -@dataclass(eq=False, repr=False) -class NodeMatcher(betterproto.Message): - """Specifies the way to match a Node. The match follows AND semantics.""" - - # Specifies match criteria on the node id. - node_id: "StringMatcher" = betterproto.message_field(1) - # Specifies match criteria on the node metadata. - node_metadatas: List["StructMatcher"] = betterproto.message_field(2) - - -from ... import type as __type__ diff --git a/src/envoy_data_plane/envoy/type/matcher/v3/__init__.py b/src/envoy_data_plane/envoy/type/matcher/v3/__init__.py deleted file mode 100644 index c1a664f..0000000 --- a/src/envoy_data_plane/envoy/type/matcher/v3/__init__.py +++ /dev/null @@ -1,331 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/matcher/v3/http_inputs.proto, envoy/type/matcher/v3/metadata.proto, envoy/type/matcher/v3/node.proto, envoy/type/matcher/v3/number.proto, envoy/type/matcher/v3/path.proto, envoy/type/matcher/v3/regex.proto, envoy/type/matcher/v3/string.proto, envoy/type/matcher/v3/struct.proto, envoy/type/matcher/v3/value.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from typing import List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RegexMatcher(betterproto.Message): - """A regex matcher designed for safety when used with untrusted input.""" - - # Google's RE2 regex engine. - google_re2: "RegexMatcherGoogleRe2" = betterproto.message_field( - 1, group="engine_type" - ) - # The regex match string. The string must be supported by the configured - # engine. - regex: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RegexMatcherGoogleRe2(betterproto.Message): - """ - Google's `RE2 `_ regex engine. The regex - string must adhere to the documented `syntax - `_. The engine is designed to - complete execution in linear time as well as limit the amount of memory - used. Envoy supports program size checking via runtime. The runtime keys - `re2.max_program_size.error_level` and `re2.max_program_size.warn_level` - can be set to integers as the maximum program size or complexity that a - compiled regex can have before an exception is thrown or a warning is - logged, respectively. `re2.max_program_size.error_level` defaults to 100, - and `re2.max_program_size.warn_level` has no default if unset (will not - check/log a warning). Envoy emits two stats for tracking the program size - of regexes: the histogram `re2.program_size`, which records the program - size, and the counter `re2.exceeded_warn_level`, which is incremented each - time the program size exceeds the warn level threshold. - """ - - # This field controls the RE2 "program size" which is a rough estimate of how - # complex a compiled regex is to evaluate. A regex that has a program size - # greater than the configured value will fail to compile. In this case, the - # configured max program size can be increased or the regex can be - # simplified. If not specified, the default is 100. This field is deprecated; - # regexp validation should be performed on the management server instead of - # being done by each individual client. .. note:: Although this field is - # deprecated, the program size will still be checked against the global - # ``re2.max_program_size.error_level`` runtime value. - max_program_size: Optional[int] = betterproto.message_field( - 1, wraps=betterproto.TYPE_UINT32 - ) - - def __post_init__(self) -> None: - super().__post_init__() - if self.max_program_size: - warnings.warn( - "RegexMatcherGoogleRe2.max_program_size is deprecated", - DeprecationWarning, - ) - - -@dataclass(eq=False, repr=False) -class RegexMatchAndSubstitute(betterproto.Message): - """ - Describes how to match a string and then produce a new string using a - regular expression and a substitution string. - """ - - # The regular expression used to find portions of a string (hereafter called - # the "subject string") that should be replaced. When a new string is - # produced during the substitution operation, the new string is initially the - # same as the subject string, but then all matches in the subject string are - # replaced by the substitution string. If replacing all matches isn't - # desired, regular expression anchors can be used to ensure a single match, - # so as to replace just one occurrence of a pattern. Capture groups can be - # used in the pattern to extract portions of the subject string, and then - # referenced in the substitution string. - pattern: "RegexMatcher" = betterproto.message_field(1) - # The string that should be substituted into matching portions of the subject - # string during a substitution operation to produce a new string. Capture - # groups in the pattern can be referenced in the substitution string. Note, - # however, that the syntax for referring to capture groups is defined by the - # chosen regular expression engine. Google's `RE2 - # `_ regular expression engine uses a - # backslash followed by the capture group number to denote a numbered capture - # group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers to capture - # group 2. - substitution: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class StringMatcher(betterproto.Message): - """Specifies the way to match a string. [#next-free-field: 8]""" - - # The input string must match exactly the string specified here. Examples: * - # *abc* only matches the value *abc*. - exact: str = betterproto.string_field(1, group="match_pattern") - # The input string must have the prefix specified here. Note: empty prefix is - # not allowed, please use regex instead. Examples: * *abc* matches the value - # *abc.xyz* - prefix: str = betterproto.string_field(2, group="match_pattern") - # The input string must have the suffix specified here. Note: empty prefix is - # not allowed, please use regex instead. Examples: * *abc* matches the value - # *xyz.abc* - suffix: str = betterproto.string_field(3, group="match_pattern") - # The input string must match the regular expression specified here. - safe_regex: "RegexMatcher" = betterproto.message_field(5, group="match_pattern") - # The input string must have the substring specified here. Note: empty - # contains match is not allowed, please use regex instead. Examples: * *abc* - # matches the value *xyz.abc.def* - contains: str = betterproto.string_field(7, group="match_pattern") - # If true, indicates the exact/prefix/suffix/contains matching should be case - # insensitive. This has no effect for the safe_regex match. For example, the - # matcher *data* will match both input string *Data* and *data* if set to - # true. - ignore_case: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class ListStringMatcher(betterproto.Message): - """Specifies a list of ways to match a string.""" - - patterns: List["StringMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DoubleMatcher(betterproto.Message): - """Specifies the way to match a double value.""" - - # If specified, the input double value must be in the range specified here. - # Note: The range is using half-open interval semantics [start, end). - range: "__v3__.DoubleRange" = betterproto.message_field(1, group="match_pattern") - # If specified, the input double value must be equal to the value specified - # here. - exact: float = betterproto.double_field(2, group="match_pattern") - - -@dataclass(eq=False, repr=False) -class ValueMatcher(betterproto.Message): - """ - Specifies the way to match a ProtobufWkt::Value. Primitive values and - ListValue are supported. StructValue is not supported and is always not - matched. [#next-free-field: 7] - """ - - # If specified, a match occurs if and only if the target value is a - # NullValue. - null_match: "ValueMatcherNullMatch" = betterproto.message_field( - 1, group="match_pattern" - ) - # If specified, a match occurs if and only if the target value is a double - # value and is matched to this field. - double_match: "DoubleMatcher" = betterproto.message_field(2, group="match_pattern") - # If specified, a match occurs if and only if the target value is a string - # value and is matched to this field. - string_match: "StringMatcher" = betterproto.message_field(3, group="match_pattern") - # If specified, a match occurs if and only if the target value is a bool - # value and is equal to this field. - bool_match: bool = betterproto.bool_field(4, group="match_pattern") - # If specified, value match will be performed based on whether the path is - # referring to a valid primitive value in the metadata. If the path is - # referring to a non-primitive value, the result is always not matched. - present_match: bool = betterproto.bool_field(5, group="match_pattern") - # If specified, a match occurs if and only if the target value is a list - # value and is matched to this field. - list_match: "ListMatcher" = betterproto.message_field(6, group="match_pattern") - - -@dataclass(eq=False, repr=False) -class ValueMatcherNullMatch(betterproto.Message): - """NullMatch is an empty message to specify a null value.""" - - pass - - -@dataclass(eq=False, repr=False) -class ListMatcher(betterproto.Message): - """Specifies the way to match a list value.""" - - # If specified, at least one of the values in the list must match the value - # specified. - one_of: "ValueMatcher" = betterproto.message_field(1, group="match_pattern") - - -@dataclass(eq=False, repr=False) -class MetadataMatcher(betterproto.Message): - """[#next-major-version: MetadataMatcher should use StructMatcher]""" - - # The filter name to retrieve the Struct from the Metadata. - filter: str = betterproto.string_field(1) - # The path to retrieve the Value from the Struct. - path: List["MetadataMatcherPathSegment"] = betterproto.message_field(2) - # The MetadataMatcher is matched if the value retrieved by path is matched to - # this value. - value: "ValueMatcher" = betterproto.message_field(3) - # If true, the match result will be inverted. - invert: bool = betterproto.bool_field(4) - - -@dataclass(eq=False, repr=False) -class MetadataMatcherPathSegment(betterproto.Message): - """ - Specifies the segment in a path to retrieve value from Metadata. Note: - Currently it's not supported to retrieve a value from a list in Metadata. - This means that if the segment key refers to a list, it has to be the last - segment in a path. - """ - - # If specified, use the key to retrieve the value in a Struct. - key: str = betterproto.string_field(1, group="segment") - - -@dataclass(eq=False, repr=False) -class PathMatcher(betterproto.Message): - """Specifies the way to match a path on HTTP request.""" - - # The `path` must match the URL path portion of the :path header. The query - # and fragment string (if present) are removed in the URL path portion. For - # example, the path */data* will match the *:path* header - # */data#fragment?param=value*. - path: "StringMatcher" = betterproto.message_field(1, group="rule") - - -@dataclass(eq=False, repr=False) -class StructMatcher(betterproto.Message): - """ - StructMatcher provides a general interface to check if a given value is - matched in google.protobuf.Struct. It uses `path` to retrieve the value - from the struct and then check if it's matched to the specified value. For - example, for the following Struct: .. code-block:: yaml fields: - a: struct_value: fields: b: - struct_value: fields: c: - string_value: pro t: list_value: - values: - string_value: m - - string_value: n The following MetadataMatcher is matched as the path [a, b, - c] will retrieve a string value "pro" from the Metadata which is matched to - the specified prefix match. .. code-block:: yaml path: - key: a - - key: b - key: c value: string_match: prefix: pr The - following StructMatcher is matched as the code will match one of the string - values in the list at the path [a, t]. .. code-block:: yaml path: - - key: a - key: t value: list_match: one_of: - string_match: exact: m An example use of StructMatcher is to - match metadata in envoy.v*.core.Node. - """ - - # The path to retrieve the Value from the Struct. - path: List["StructMatcherPathSegment"] = betterproto.message_field(2) - # The StructMatcher is matched if the value retrieved by path is matched to - # this value. - value: "ValueMatcher" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class StructMatcherPathSegment(betterproto.Message): - """Specifies the segment in a path to retrieve value from Struct.""" - - # If specified, use the key to retrieve the value in a Struct. - key: str = betterproto.string_field(1, group="segment") - - -@dataclass(eq=False, repr=False) -class NodeMatcher(betterproto.Message): - """Specifies the way to match a Node. The match follows AND semantics.""" - - # Specifies match criteria on the node id. - node_id: "StringMatcher" = betterproto.message_field(1) - # Specifies match criteria on the node metadata. - node_metadatas: List["StructMatcher"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class HttpRequestHeaderMatchInput(betterproto.Message): - """ - Match input indicates that matching should be done on a specific request - header. The resulting input string will be all headers for the given key - joined by a comma, e.g. if the request contains two 'foo' headers with - value 'bar' and 'baz', the input string will be 'bar,baz'. - [#comment:TODO(snowp): Link to unified matching docs.] - """ - - # The request header to match on. - header_name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class HttpRequestTrailerMatchInput(betterproto.Message): - """ - Match input indicates that matching should be done on a specific request - trailer. The resulting input string will be all headers for the given key - joined by a comma, e.g. if the request contains two 'foo' headers with - value 'bar' and 'baz', the input string will be 'bar,baz'. - [#comment:TODO(snowp): Link to unified matching docs.] - """ - - # The request trailer to match on. - header_name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class HttpResponseHeaderMatchInput(betterproto.Message): - """ - Match input indicating that matching should be done on a specific response - header. The resulting input string will be all headers for the given key - joined by a comma, e.g. if the response contains two 'foo' headers with - value 'bar' and 'baz', the input string will be 'bar,baz'. - [#comment:TODO(snowp): Link to unified matching docs.] - """ - - # The response header to match on. - header_name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class HttpResponseTrailerMatchInput(betterproto.Message): - """ - Match input indicates that matching should be done on a specific response - trailer. The resulting input string will be all headers for the given key - joined by a comma, e.g. if the request contains two 'foo' headers with - value 'bar' and 'baz', the input string will be 'bar,baz'. - [#comment:TODO(snowp): Link to unified matching docs.] - """ - - # The response trailer to match on. - header_name: str = betterproto.string_field(1) - - -from ... import v3 as __v3__ diff --git a/src/envoy_data_plane/envoy/type/metadata/__init__.py b/src/envoy_data_plane/envoy/type/metadata/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/type/metadata/v2/__init__.py b/src/envoy_data_plane/envoy/type/metadata/v2/__init__.py deleted file mode 100644 index af7bbf3..0000000 --- a/src/envoy_data_plane/envoy/type/metadata/v2/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/metadata/v2/metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class MetadataKey(betterproto.Message): - """ - MetadataKey provides a general interface using `key` and `path` to retrieve - value from :ref:`Metadata `. For example, for - the following Metadata: .. code-block:: yaml filter_metadata: - envoy.xxx: prop: foo: bar xyz: hello: - envoy The following MetadataKey will retrieve a string value "bar" from the - Metadata. .. code-block:: yaml key: envoy.xxx path: - key: prop - - key: foo - """ - - # The key name of Metadata to retrieve the Struct from the metadata. - # Typically, it represents a builtin subsystem or custom extension. - key: str = betterproto.string_field(1) - # The path to retrieve the Value from the Struct. It can be a prefix or a - # full path, e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a - # string in the example, which depends on the particular scenario. Note: Due - # to that only the key type segment is supported, the path can not specify a - # list unless the list is the last segment. - path: List["MetadataKeyPathSegment"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class MetadataKeyPathSegment(betterproto.Message): - """ - Specifies the segment in a path to retrieve value from Metadata. Currently - it is only supported to specify the key, i.e. field name, as one segment of - a path. - """ - - # If specified, use the key to retrieve the value in a Struct. - key: str = betterproto.string_field(1, group="segment") - - -@dataclass(eq=False, repr=False) -class MetadataKind(betterproto.Message): - """Describes what kind of metadata.""" - - # Request kind of metadata. - request: "MetadataKindRequest" = betterproto.message_field(1, group="kind") - # Route kind of metadata. - route: "MetadataKindRoute" = betterproto.message_field(2, group="kind") - # Cluster kind of metadata. - cluster: "MetadataKindCluster" = betterproto.message_field(3, group="kind") - # Host kind of metadata. - host: "MetadataKindHost" = betterproto.message_field(4, group="kind") - - -@dataclass(eq=False, repr=False) -class MetadataKindRequest(betterproto.Message): - """Represents dynamic metadata associated with the request.""" - - pass - - -@dataclass(eq=False, repr=False) -class MetadataKindRoute(betterproto.Message): - """ - Represents metadata from :ref:`the - route`. - """ - - pass - - -@dataclass(eq=False, repr=False) -class MetadataKindCluster(betterproto.Message): - """ - Represents metadata from :ref:`the upstream - cluster`. - """ - - pass - - -@dataclass(eq=False, repr=False) -class MetadataKindHost(betterproto.Message): - """ - Represents metadata from :ref:`the upstream - host`. - """ - - pass diff --git a/src/envoy_data_plane/envoy/type/metadata/v3/__init__.py b/src/envoy_data_plane/envoy/type/metadata/v3/__init__.py deleted file mode 100644 index 2a3929e..0000000 --- a/src/envoy_data_plane/envoy/type/metadata/v3/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/metadata/v3/metadata.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class MetadataKey(betterproto.Message): - """ - MetadataKey provides a general interface using `key` and `path` to retrieve - value from :ref:`Metadata `. For - example, for the following Metadata: .. code-block:: yaml - filter_metadata: envoy.xxx: prop: foo: bar - xyz: hello: envoy The following MetadataKey will retrieve a - string value "bar" from the Metadata. .. code-block:: yaml key: - envoy.xxx path: - key: prop - key: foo - """ - - # The key name of Metadata to retrieve the Struct from the metadata. - # Typically, it represents a builtin subsystem or custom extension. - key: str = betterproto.string_field(1) - # The path to retrieve the Value from the Struct. It can be a prefix or a - # full path, e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a - # string in the example, which depends on the particular scenario. Note: Due - # to that only the key type segment is supported, the path can not specify a - # list unless the list is the last segment. - path: List["MetadataKeyPathSegment"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class MetadataKeyPathSegment(betterproto.Message): - """ - Specifies the segment in a path to retrieve value from Metadata. Currently - it is only supported to specify the key, i.e. field name, as one segment of - a path. - """ - - # If specified, use the key to retrieve the value in a Struct. - key: str = betterproto.string_field(1, group="segment") - - -@dataclass(eq=False, repr=False) -class MetadataKind(betterproto.Message): - """Describes what kind of metadata.""" - - # Request kind of metadata. - request: "MetadataKindRequest" = betterproto.message_field(1, group="kind") - # Route kind of metadata. - route: "MetadataKindRoute" = betterproto.message_field(2, group="kind") - # Cluster kind of metadata. - cluster: "MetadataKindCluster" = betterproto.message_field(3, group="kind") - # Host kind of metadata. - host: "MetadataKindHost" = betterproto.message_field(4, group="kind") - - -@dataclass(eq=False, repr=False) -class MetadataKindRequest(betterproto.Message): - """Represents dynamic metadata associated with the request.""" - - pass - - -@dataclass(eq=False, repr=False) -class MetadataKindRoute(betterproto.Message): - """ - Represents metadata from :ref:`the - route`. - """ - - pass - - -@dataclass(eq=False, repr=False) -class MetadataKindCluster(betterproto.Message): - """ - Represents metadata from :ref:`the upstream - cluster`. - """ - - pass - - -@dataclass(eq=False, repr=False) -class MetadataKindHost(betterproto.Message): - """ - Represents metadata from :ref:`the upstream - host`. - """ - - pass diff --git a/src/envoy_data_plane/envoy/type/tracing/__init__.py b/src/envoy_data_plane/envoy/type/tracing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/type/tracing/v2/__init__.py b/src/envoy_data_plane/envoy/type/tracing/v2/__init__.py deleted file mode 100644 index 86a4ba2..0000000 --- a/src/envoy_data_plane/envoy/type/tracing/v2/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/tracing/v2/custom_tag.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CustomTag(betterproto.Message): - """Describes custom tags for the active span. [#next-free-field: 6]""" - - # Used to populate the tag name. - tag: str = betterproto.string_field(1) - # A literal custom tag. - literal: "CustomTagLiteral" = betterproto.message_field(2, group="type") - # An environment custom tag. - environment: "CustomTagEnvironment" = betterproto.message_field(3, group="type") - # A request header custom tag. - request_header: "CustomTagHeader" = betterproto.message_field(4, group="type") - # A custom tag to obtain tag value from the metadata. - metadata: "CustomTagMetadata" = betterproto.message_field(5, group="type") - - -@dataclass(eq=False, repr=False) -class CustomTagLiteral(betterproto.Message): - """Literal type custom tag with static value for the tag value.""" - - # Static literal value to populate the tag value. - value: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class CustomTagEnvironment(betterproto.Message): - """Environment type custom tag with environment name and default value.""" - - # Environment variable name to obtain the value to populate the tag value. - name: str = betterproto.string_field(1) - # When the environment variable is not found, the tag value will be populated - # with this default value if specified, otherwise no tag will be populated. - default_value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class CustomTagHeader(betterproto.Message): - """Header type custom tag with header name and default value.""" - - # Header name to obtain the value to populate the tag value. - name: str = betterproto.string_field(1) - # When the header does not exist, the tag value will be populated with this - # default value if specified, otherwise no tag will be populated. - default_value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class CustomTagMetadata(betterproto.Message): - """ - Metadata type custom tag using :ref:`MetadataKey - ` to retrieve the protobuf - value from :ref:`Metadata `, and populate the - tag value with `the canonical JSON `_ representation of it. - """ - - # Specify what kind of metadata to obtain tag value from. - kind: "__metadata_v2__.MetadataKind" = betterproto.message_field(1) - # Metadata key to define the path to retrieve the tag value. - metadata_key: "__metadata_v2__.MetadataKey" = betterproto.message_field(2) - # When no valid metadata is found, the tag value would be populated with this - # default value if specified, otherwise no tag would be populated. - default_value: str = betterproto.string_field(3) - - -from ...metadata import v2 as __metadata_v2__ diff --git a/src/envoy_data_plane/envoy/type/tracing/v3/__init__.py b/src/envoy_data_plane/envoy/type/tracing/v3/__init__.py deleted file mode 100644 index e443b30..0000000 --- a/src/envoy_data_plane/envoy/type/tracing/v3/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/tracing/v3/custom_tag.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class CustomTag(betterproto.Message): - """Describes custom tags for the active span. [#next-free-field: 6]""" - - # Used to populate the tag name. - tag: str = betterproto.string_field(1) - # A literal custom tag. - literal: "CustomTagLiteral" = betterproto.message_field(2, group="type") - # An environment custom tag. - environment: "CustomTagEnvironment" = betterproto.message_field(3, group="type") - # A request header custom tag. - request_header: "CustomTagHeader" = betterproto.message_field(4, group="type") - # A custom tag to obtain tag value from the metadata. - metadata: "CustomTagMetadata" = betterproto.message_field(5, group="type") - - -@dataclass(eq=False, repr=False) -class CustomTagLiteral(betterproto.Message): - """Literal type custom tag with static value for the tag value.""" - - # Static literal value to populate the tag value. - value: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class CustomTagEnvironment(betterproto.Message): - """Environment type custom tag with environment name and default value.""" - - # Environment variable name to obtain the value to populate the tag value. - name: str = betterproto.string_field(1) - # When the environment variable is not found, the tag value will be populated - # with this default value if specified, otherwise no tag will be populated. - default_value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class CustomTagHeader(betterproto.Message): - """Header type custom tag with header name and default value.""" - - # Header name to obtain the value to populate the tag value. - name: str = betterproto.string_field(1) - # When the header does not exist, the tag value will be populated with this - # default value if specified, otherwise no tag will be populated. - default_value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class CustomTagMetadata(betterproto.Message): - """ - Metadata type custom tag using :ref:`MetadataKey - ` to retrieve the protobuf - value from :ref:`Metadata `, and - populate the tag value with `the canonical JSON - `_ - representation of it. - """ - - # Specify what kind of metadata to obtain tag value from. - kind: "__metadata_v3__.MetadataKind" = betterproto.message_field(1) - # Metadata key to define the path to retrieve the tag value. - metadata_key: "__metadata_v3__.MetadataKey" = betterproto.message_field(2) - # When no valid metadata is found, the tag value would be populated with this - # default value if specified, otherwise no tag would be populated. - default_value: str = betterproto.string_field(3) - - -from ...metadata import v3 as __metadata_v3__ diff --git a/src/envoy_data_plane/envoy/type/v3/__init__.py b/src/envoy_data_plane/envoy/type/v3/__init__.py deleted file mode 100644 index c3d7b19..0000000 --- a/src/envoy_data_plane/envoy/type/v3/__init__.py +++ /dev/null @@ -1,245 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/type/v3/hash_policy.proto, envoy/type/v3/http.proto, envoy/type/v3/http_status.proto, envoy/type/v3/percent.proto, envoy/type/v3/range.proto, envoy/type/v3/ratelimit_unit.proto, envoy/type/v3/semantic_version.proto, envoy/type/v3/token_bucket.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta -from typing import Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class FractionalPercentDenominatorType(betterproto.Enum): - HUNDRED = 0 - TEN_THOUSAND = 1 - MILLION = 2 - - -class CodecClientType(betterproto.Enum): - HTTP1 = 0 - HTTP2 = 1 - # [#not-implemented-hide:] QUIC implementation is not production ready yet. - # Use this enum with caution to prevent accidental execution of QUIC code. - # I.e. `!= HTTP2` is no longer sufficient to distinguish HTTP1 and HTTP2 - # traffic. - HTTP3 = 2 - - -class RateLimitUnit(betterproto.Enum): - """Identifies the unit of of time for rate limit.""" - - # The time unit is not known. - UNKNOWN = 0 - # The time unit representing a second. - SECOND = 1 - # The time unit representing a minute. - MINUTE = 2 - # The time unit representing an hour. - HOUR = 3 - # The time unit representing a day. - DAY = 4 - - -class StatusCode(betterproto.Enum): - """ - HTTP response codes supported in Envoy. For more details: - https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml - """ - - # Empty - This code not part of the HTTP status code specification, but it is - # needed for proto `enum` type. - Empty = 0 - Continue = 100 - OK = 200 - Created = 201 - Accepted = 202 - NonAuthoritativeInformation = 203 - NoContent = 204 - ResetContent = 205 - PartialContent = 206 - MultiStatus = 207 - AlreadyReported = 208 - IMUsed = 226 - MultipleChoices = 300 - MovedPermanently = 301 - Found = 302 - SeeOther = 303 - NotModified = 304 - UseProxy = 305 - TemporaryRedirect = 307 - PermanentRedirect = 308 - BadRequest = 400 - Unauthorized = 401 - PaymentRequired = 402 - Forbidden = 403 - NotFound = 404 - MethodNotAllowed = 405 - NotAcceptable = 406 - ProxyAuthenticationRequired = 407 - RequestTimeout = 408 - Conflict = 409 - Gone = 410 - LengthRequired = 411 - PreconditionFailed = 412 - PayloadTooLarge = 413 - URITooLong = 414 - UnsupportedMediaType = 415 - RangeNotSatisfiable = 416 - ExpectationFailed = 417 - MisdirectedRequest = 421 - UnprocessableEntity = 422 - Locked = 423 - FailedDependency = 424 - UpgradeRequired = 426 - PreconditionRequired = 428 - TooManyRequests = 429 - RequestHeaderFieldsTooLarge = 431 - InternalServerError = 500 - NotImplemented = 501 - BadGateway = 502 - ServiceUnavailable = 503 - GatewayTimeout = 504 - HTTPVersionNotSupported = 505 - VariantAlsoNegotiates = 506 - InsufficientStorage = 507 - LoopDetected = 508 - NotExtended = 510 - NetworkAuthenticationRequired = 511 - - -@dataclass(eq=False, repr=False) -class Percent(betterproto.Message): - """Identifies a percentage, in the range [0.0, 100.0].""" - - value: float = betterproto.double_field(1) - - -@dataclass(eq=False, repr=False) -class FractionalPercent(betterproto.Message): - """ - A fractional percentage is used in cases in which for performance reasons - performing floating point to integer conversions during randomness - calculations is undesirable. The message includes both a numerator and - denominator that together determine the final fractional value. * - **Example**: 1/100 = 1%. * **Example**: 3/10000 = 0.03%. - """ - - # Specifies the numerator. Defaults to 0. - numerator: int = betterproto.uint32_field(1) - # Specifies the denominator. If the denominator specified is less than the - # numerator, the final fractional percentage is capped at 1 (100%). - denominator: "FractionalPercentDenominatorType" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class SemanticVersion(betterproto.Message): - """ - Envoy uses SemVer (https://semver.org/). Major/minor versions indicate - expected behaviors and APIs, the patch version field is used only for - security fixes and can be generally ignored. - """ - - major_number: int = betterproto.uint32_field(1) - minor_number: int = betterproto.uint32_field(2) - patch: int = betterproto.uint32_field(3) - - -@dataclass(eq=False, repr=False) -class Int64Range(betterproto.Message): - """ - Specifies the int64 start and end of the range using half-open interval - semantics [start, end). - """ - - # start of the range (inclusive) - start: int = betterproto.int64_field(1) - # end of the range (exclusive) - end: int = betterproto.int64_field(2) - - -@dataclass(eq=False, repr=False) -class Int32Range(betterproto.Message): - """ - Specifies the int32 start and end of the range using half-open interval - semantics [start, end). - """ - - # start of the range (inclusive) - start: int = betterproto.int32_field(1) - # end of the range (exclusive) - end: int = betterproto.int32_field(2) - - -@dataclass(eq=False, repr=False) -class DoubleRange(betterproto.Message): - """ - Specifies the double start and end of the range using half-open interval - semantics [start, end). - """ - - # start of the range (inclusive) - start: float = betterproto.double_field(1) - # end of the range (exclusive) - end: float = betterproto.double_field(2) - - -@dataclass(eq=False, repr=False) -class TokenBucket(betterproto.Message): - """Configures a token bucket, typically used for rate limiting.""" - - # The maximum tokens that the bucket can hold. This is also the number of - # tokens that the bucket initially contains. - max_tokens: int = betterproto.uint32_field(1) - # The number of tokens added to the bucket during each fill interval. If not - # specified, defaults to a single token. - tokens_per_fill: Optional[int] = betterproto.message_field( - 2, wraps=betterproto.TYPE_UINT32 - ) - # The fill interval that tokens are added to the bucket. During each fill - # interval `tokens_per_fill` are added to the bucket. The bucket will never - # contain more than `max_tokens` tokens. - fill_interval: timedelta = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class HashPolicy(betterproto.Message): - """Specifies the hash policy""" - - source_ip: "HashPolicySourceIp" = betterproto.message_field( - 1, group="policy_specifier" - ) - filter_state: "HashPolicyFilterState" = betterproto.message_field( - 2, group="policy_specifier" - ) - - -@dataclass(eq=False, repr=False) -class HashPolicySourceIp(betterproto.Message): - """ - The source IP will be used to compute the hash used by hash-based load - balancing algorithms. - """ - - pass - - -@dataclass(eq=False, repr=False) -class HashPolicyFilterState(betterproto.Message): - """ - An Object in the :ref:`filterState - ` will be used to compute the - hash used by hash-based load balancing algorithms. - """ - - # The name of the Object in the filterState, which is an Envoy::Hashable - # object. If there is no data associated with the key, or the stored object - # is not Envoy::Hashable, no hash will be produced. - key: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class HttpStatus(betterproto.Message): - """HTTP status.""" - - # Supplies HTTP response code. - code: "StatusCode" = betterproto.enum_field(1) diff --git a/src/envoy_data_plane/envoy/watchdog/__init__.py b/src/envoy_data_plane/envoy/watchdog/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/envoy/watchdog/v3/__init__.py b/src/envoy_data_plane/envoy/watchdog/v3/__init__.py deleted file mode 100644 index 16a12e1..0000000 --- a/src/envoy_data_plane/envoy/watchdog/v3/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: envoy/watchdog/v3/abort_action.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import timedelta - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class AbortActionConfig(betterproto.Message): - """ - A GuardDogAction that will terminate the process by killing the stuck - thread. This would allow easier access to the call stack of the stuck - thread since we would run signal handlers on that thread. By default this - will be registered to run as the last watchdog action on KILL and MULTIKILL - events if those are enabled. - """ - - # How long to wait for the thread to respond to the thread kill function - # before killing the process from this action. This is a blocking action. By - # default this is 5 seconds. - wait_duration: timedelta = betterproto.message_field(1) diff --git a/src/envoy_data_plane/google/__init__.py b/src/envoy_data_plane/google/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/google/api/__init__.py b/src/envoy_data_plane/google/api/__init__.py deleted file mode 100644 index a815346..0000000 --- a/src/envoy_data_plane/google/api/__init__.py +++ /dev/null @@ -1,224 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: google/api/annotations.proto, google/api/http.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Http(betterproto.Message): - """ - Defines the HTTP configuration for an API service. It contains a list of - [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC - method to one or more HTTP REST API methods. - """ - - # A list of HTTP configuration rules that apply to individual API methods. - # **NOTE:** All service configuration rules follow "last one wins" order. - rules: List["HttpRule"] = betterproto.message_field(1) - # When set to true, URL path parameters will be fully URI-decoded except in - # cases of single segment matches in reserved expansion, where "%2F" will be - # left encoded. The default behavior is to not decode RFC 6570 reserved - # characters in multi segment matches. - fully_decode_reserved_expansion: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class HttpRule(betterproto.Message): - """ - # gRPC Transcoding gRPC Transcoding is a feature for mapping between a gRPC - method and one or more HTTP REST endpoints. It allows developers to build a - single API service that supports both gRPC APIs and REST APIs. Many - systems, including [Google APIs](https://github.com/googleapis/googleapis), - [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC - Gateway](https://github.com/grpc-ecosystem/grpc-gateway), and - [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature and - use it for large scale production services. `HttpRule` defines the schema - of the gRPC/REST mapping. The mapping specifies how different portions of - the gRPC request message are mapped to the URL path, URL query parameters, - and HTTP request body. It also controls how the gRPC response message is - mapped to the HTTP response body. `HttpRule` is typically specified as an - `google.api.http` annotation on the gRPC method. Each mapping specifies a - URL path template and an HTTP method. The path template may refer to one or - more fields in the gRPC request message, as long as each field is a non- - repeated field with a primitive (non-message) type. The path template - controls how fields of the request message are mapped to the URL path. - Example: service Messaging { rpc GetMessage(GetMessageRequest) - returns (Message) { option (google.api.http) = { get: - "/v1/{name=messages/*}" }; } } message - GetMessageRequest { string name = 1; // Mapped to URL path. } - message Message { string text = 1; // The resource content. } - This enables an HTTP REST to gRPC mapping as below: HTTP | gRPC -----|----- - `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` Any - fields in the request message which are not bound by the path template - automatically become HTTP query parameters if there is no HTTP request - body. For example: service Messaging { rpc - GetMessage(GetMessageRequest) returns (Message) { option - (google.api.http) = { get:"/v1/messages/{message_id}" - }; } } message GetMessageRequest { message SubMessage { - string subfield = 1; } string message_id = 1; // Mapped to URL - path. int64 revision = 2; // Mapped to URL query parameter - `revision`. SubMessage sub = 3; // Mapped to URL query parameter - `sub.subfield`. } This enables a HTTP JSON to RPC mapping as below: - HTTP | gRPC -----|----- `GET - /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: - "123456" revision: 2 sub: SubMessage(subfield: "foo"))` Note that fields - which are mapped to URL query parameters must have a primitive type or a - repeated primitive type or a non-repeated message type. In the case of a - repeated type, the parameter can be repeated in the URL as - `...?param=A¶m=B`. In the case of a message type, each field of the - message is mapped to a separate parameter, such as - `...?foo.a=A&foo.b=B&foo.c=C`. For HTTP methods that allow a request body, - the `body` field specifies the mapping. Consider a REST update method on - the message resource collection: service Messaging { rpc - UpdateMessage(UpdateMessageRequest) returns (Message) { option - (google.api.http) = { patch: "/v1/messages/{message_id}" - body: "message" }; } } message UpdateMessageRequest { - string message_id = 1; // mapped to the URL Message message = 2; // - mapped to the body } The following HTTP JSON to RPC mapping is enabled, - where the representation of the JSON in the request body is determined by - protos JSON encoding: HTTP | gRPC -----|----- `PATCH /v1/messages/123456 { - "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: - "Hi!" })` The special name `*` can be used in the body mapping to define - that every field not bound by the path template should be mapped to the - request body. This enables the following alternative definition of the - update method: service Messaging { rpc UpdateMessage(Message) - returns (Message) { option (google.api.http) = { patch: - "/v1/messages/{message_id}" body: "*" }; } } - message Message { string message_id = 1; string text = 2; } - The following HTTP JSON to RPC mapping is enabled: HTTP | gRPC -----|----- - `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: - "123456" text: "Hi!")` Note that when using `*` in the body mapping, it is - not possible to have HTTP parameters, as all fields not bound by the path - end in the body. This makes this option more rarely used in practice when - defining REST APIs. The common usage of `*` is in custom methods which - don't use the URL at all for transferring data. It is possible to define - multiple HTTP methods for one RPC by using the `additional_bindings` - option. Example: service Messaging { rpc - GetMessage(GetMessageRequest) returns (Message) { option - (google.api.http) = { get: "/v1/messages/{message_id}" - additional_bindings { get: - "/v1/users/{user_id}/messages/{message_id}" } }; } - } message GetMessageRequest { string message_id = 1; string - user_id = 2; } This enables the following two alternative HTTP JSON to - RPC mappings: HTTP | gRPC -----|----- `GET /v1/messages/123456` | - `GetMessage(message_id: "123456")` `GET /v1/users/me/messages/123456` | - `GetMessage(user_id: "me" message_id: "123456")` ## Rules for HTTP mapping - 1. Leaf request fields (recursive expansion nested messages in the request - message) are classified into three categories: - Fields referred by the - path template. They are passed via the URL path. - Fields referred by - the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP - request body. - All other fields are passed via the URL query - parameters, and the parameter name is the field path in the request - message. A repeated field can be represented as multiple query - parameters under the same name. 2. If - [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query - parameter, all fields are passed via URL path and HTTP request body. - 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no - HTTP request body, all fields are passed via URL path and URL query - parameters. ### Path template syntax Template = "/" Segments [ Verb ] ; - Segments = Segment { "/" Segment } ; Segment = "*" | "**" | LITERAL | - Variable ; Variable = "{" FieldPath [ "=" Segments ] "}" ; - FieldPath = IDENT { "." IDENT } ; Verb = ":" LITERAL ; The syntax - `*` matches a single URL path segment. The syntax `**` matches zero or more - URL path segments, which must be the last part of the URL path except the - `Verb`. The syntax `Variable` matches part of the URL path as specified by - its template. A variable template must not contain other variables. If a - variable matches a single path segment, its template may be omitted, e.g. - `{var}` is equivalent to `{var=*}`. The syntax `LITERAL` matches literal - text in the URL path. If the `LITERAL` contains any reserved character, - such characters should be percent-encoded before the matching. If a - variable contains exactly one path segment, such as `"{var}"` or - `"{var=*}"`, when such a variable is expanded into a URL path on the client - side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The - server side does the reverse decoding. Such variables show up in the - [Discovery - Document](https://developers.google.com/discovery/v1/reference/apis) as - `{var}`. If a variable contains multiple path segments, such as - `"{var=foo/*}"` or `"{var=**}"`, when such a variable is expanded into a - URL path on the client side, all characters except `[-_.~/0-9a-zA-Z]` are - percent-encoded. The server side does the reverse decoding, except "%2F" - and "%2f" are left unchanged. Such variables show up in the [Discovery - Document](https://developers.google.com/discovery/v1/reference/apis) as - `{+var}`. ## Using gRPC API Service Configuration gRPC API Service - Configuration (service config) is a configuration language for configuring - a gRPC service to become a user-facing product. The service config is - simply the YAML representation of the `google.api.Service` proto message. - As an alternative to annotating your proto file, you can configure gRPC - transcoding in your service config YAML files. You do this by specifying a - `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same - effect as the proto annotation. This can be particularly useful if you have - a proto that is reused in multiple services. Note that any transcoding - specified in the service config will override any matching transcoding - configuration in the proto. Example: http: rules: # - Selects a gRPC method and applies HttpRule to it. - selector: - example.v1.Messaging.GetMessage get: - /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC - Transcoding is used to map a gRPC to JSON REST endpoints, the proto to JSON - conversion must follow the [proto3 - specification](https://developers.google.com/protocol- - buffers/docs/proto3#json). While the single segment variable follows the - semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 - Simple String Expansion, the multi segment variable **does not** follow RFC - 6570 Section 3.2.3 Reserved Expansion. The reason is that the Reserved - Expansion does not expand special characters like `?` and `#`, which would - lead to invalid URLs. As the result, gRPC Transcoding uses a custom - encoding for multi segment variables. The path variables **must not** refer - to any repeated or mapped field, because client libraries are not capable - of handling such variable expansion. The path variables **must not** - capture the leading "/" character. The reason is that the most common use - case "{var}" does not capture the leading "/" character. For consistency, - all path variables must share the same behavior. Repeated message fields - must not be mapped to URL query parameters, because no client library can - support such complicated mapping. If an API needs to use a JSON array for - request or response body, it can map the request or response body to a - repeated field. However, some gRPC Transcoding implementations may not - support this feature. - """ - - # Selects a method to which this rule applies. Refer to - # [selector][google.api.DocumentationRule.selector] for syntax details. - selector: str = betterproto.string_field(1) - # Maps to HTTP GET. Used for listing and getting information about resources. - get: str = betterproto.string_field(2, group="pattern") - # Maps to HTTP PUT. Used for replacing a resource. - put: str = betterproto.string_field(3, group="pattern") - # Maps to HTTP POST. Used for creating a resource or performing an action. - post: str = betterproto.string_field(4, group="pattern") - # Maps to HTTP DELETE. Used for deleting a resource. - delete: str = betterproto.string_field(5, group="pattern") - # Maps to HTTP PATCH. Used for updating a resource. - patch: str = betterproto.string_field(6, group="pattern") - # The custom pattern is used for specifying an HTTP method that is not - # included in the `pattern` field, such as HEAD, or "*" to leave the HTTP - # method unspecified for this rule. The wild-card rule is useful for services - # that provide content to Web (HTML) clients. - custom: "CustomHttpPattern" = betterproto.message_field(8, group="pattern") - # The name of the request field whose value is mapped to the HTTP request - # body, or `*` for mapping all request fields not captured by the path - # pattern to the HTTP body, or omitted for not having any HTTP request body. - # NOTE: the referred field must be present at the top-level of the request - # message type. - body: str = betterproto.string_field(7) - # Optional. The name of the response field whose value is mapped to the HTTP - # response body. When omitted, the entire response message will be used as - # the HTTP response body. NOTE: The referred field must be present at the - # top-level of the response message type. - response_body: str = betterproto.string_field(12) - # Additional HTTP bindings for the selector. Nested bindings must not contain - # an `additional_bindings` field themselves (that is, the nesting may only be - # one level deep). - additional_bindings: List["HttpRule"] = betterproto.message_field(11) - - -@dataclass(eq=False, repr=False) -class CustomHttpPattern(betterproto.Message): - """A custom pattern is used for defining custom HTTP verb.""" - - # The name of this custom HTTP verb. - kind: str = betterproto.string_field(1) - # The path matched by this custom verb. - path: str = betterproto.string_field(2) diff --git a/src/envoy_data_plane/google/api/expr/__init__.py b/src/envoy_data_plane/google/api/expr/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/google/api/expr/v1alpha1/__init__.py b/src/envoy_data_plane/google/api/expr/v1alpha1/__init__.py deleted file mode 100644 index cff332a..0000000 --- a/src/envoy_data_plane/google/api/expr/v1alpha1/__init__.py +++ /dev/null @@ -1,536 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: google/api/expr/v1alpha1/checked.proto, google/api/expr/v1alpha1/syntax.proto -# plugin: python-betterproto -import warnings -from dataclasses import dataclass -from datetime import datetime, timedelta -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class TypePrimitiveType(betterproto.Enum): - PRIMITIVE_TYPE_UNSPECIFIED = 0 - BOOL = 1 - INT64 = 2 - UINT64 = 3 - DOUBLE = 4 - STRING = 5 - BYTES = 6 - - -class TypeWellKnownType(betterproto.Enum): - WELL_KNOWN_TYPE_UNSPECIFIED = 0 - ANY = 1 - TIMESTAMP = 2 - DURATION = 3 - - -@dataclass(eq=False, repr=False) -class ParsedExpr(betterproto.Message): - """ - An expression together with source information as returned by the parser. - """ - - # The parsed expression. - expr: "Expr" = betterproto.message_field(2) - # The source info derived from input that generated the parsed `expr`. - source_info: "SourceInfo" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Expr(betterproto.Message): - """ - An abstract representation of a common expression. Expressions are - abstractly represented as a collection of identifiers, select statements, - function calls, literals, and comprehensions. All operators with the - exception of the '.' operator are modelled as function calls. This makes it - easy to represent new operators into the existing AST. All references - within expressions must resolve to a [Decl][google.api.expr.v1alpha1.Decl] - provided at type-check for an expression to be valid. A reference may - either be a bare identifier `name` or a qualified identifier - `google.api.name`. References may either refer to a value or a function - declaration. For example, the expression - `google.api.name.startsWith('expr')` references the declaration - `google.api.name` within a - [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and the - function declaration `startsWith`. - """ - - # Required. An id assigned to this node by the parser which is unique in a - # given expression tree. This is used to associate type information and other - # attributes to a node in the parse tree. - id: int = betterproto.int64_field(2) - # A literal expression. - const_expr: "Constant" = betterproto.message_field(3, group="expr_kind") - # An identifier expression. - ident_expr: "ExprIdent" = betterproto.message_field(4, group="expr_kind") - # A field selection expression, e.g. `request.auth`. - select_expr: "ExprSelect" = betterproto.message_field(5, group="expr_kind") - # A call expression, including calls to predefined functions and operators. - call_expr: "ExprCall" = betterproto.message_field(6, group="expr_kind") - # A list creation expression. - list_expr: "ExprCreateList" = betterproto.message_field(7, group="expr_kind") - # A map or message creation expression. - struct_expr: "ExprCreateStruct" = betterproto.message_field(8, group="expr_kind") - # A comprehension expression. - comprehension_expr: "ExprComprehension" = betterproto.message_field( - 9, group="expr_kind" - ) - - -@dataclass(eq=False, repr=False) -class ExprIdent(betterproto.Message): - """An identifier expression. e.g. `request`.""" - - # Required. Holds a single, unqualified identifier, possibly preceded by a - # '.'. Qualified names are represented by the - # [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression. - name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class ExprSelect(betterproto.Message): - """A field selection expression. e.g. `request.auth`.""" - - # Required. The target of the selection expression. For example, in the - # select expression `request.auth`, the `request` portion of the expression - # is the `operand`. - operand: "Expr" = betterproto.message_field(1) - # Required. The name of the field to select. For example, in the select - # expression `request.auth`, the `auth` portion of the expression would be - # the `field`. - field: str = betterproto.string_field(2) - # Whether the select is to be interpreted as a field presence test. This - # results from the macro `has(request.auth)`. - test_only: bool = betterproto.bool_field(3) - - -@dataclass(eq=False, repr=False) -class ExprCall(betterproto.Message): - """ - A call expression, including calls to predefined functions and operators. - For example, `value == 10`, `size(map_value)`. - """ - - # The target of an method call-style expression. For example, `x` in `x.f()`. - target: "Expr" = betterproto.message_field(1) - # Required. The name of the function or method being called. - function: str = betterproto.string_field(2) - # The arguments. - args: List["Expr"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class ExprCreateList(betterproto.Message): - """ - A list creation expression. Lists may either be homogenous, e.g. `[1, 2, - 3]`, or heterogeneous, e.g. `dyn([1, 'hello', 2.0])` - """ - - # The elements part of the list. - elements: List["Expr"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class ExprCreateStruct(betterproto.Message): - """ - A map or message creation expression. Maps are constructed as `{'key_name': - 'value'}`. Message construction is similar, but prefixed with a type name - and composed of field ids: `types.MyType{field_id: 'value'}`. - """ - - # The type name of the message to be created, empty when creating map - # literals. - message_name: str = betterproto.string_field(1) - # The entries in the creation expression. - entries: List["ExprCreateStructEntry"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ExprCreateStructEntry(betterproto.Message): - """Represents an entry.""" - - # Required. An id assigned to this node by the parser which is unique in a - # given expression tree. This is used to associate type information and other - # attributes to the node. - id: int = betterproto.int64_field(1) - # The field key for a message creator statement. - field_key: str = betterproto.string_field(2, group="key_kind") - # The key expression for a map creation statement. - map_key: "Expr" = betterproto.message_field(3, group="key_kind") - # Required. The value assigned to the key. - value: "Expr" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class ExprComprehension(betterproto.Message): - """ - A comprehension expression applied to a list or map. Comprehensions are not - part of the core syntax, but enabled with macros. A macro matches a - specific call signature within a parsed AST and replaces the call with an - alternate AST block. Macro expansion happens at parse time. The following - macros are supported within CEL: Aggregate type macros may be applied to - all elements in a list or all keys in a map: * `all`, `exists`, - `exists_one` - test a predicate expression against the inputs and - return `true` if the predicate is satisfied for all, any, or only one - value `list.all(x, x < 10)`. * `filter` - test a predicate expression - against the inputs and return the subset of elements which satisfy the - predicate: `payments.filter(p, p > 1000)`. * `map` - apply an - expression to all elements in the input and return the output aggregate - type: `[1, 2, 3].map(i, i * i)`. The `has(m.x)` macro tests whether the - property `x` is present in struct `m`. The semantics of this macro depend - on the type of `m`. For proto2 messages `has(m.x)` is defined as 'defined, - but not set`. For proto3, the macro tests whether the property is set to - its default. For map and struct types, the macro tests whether the property - `x` is defined on `m`. - """ - - # The name of the iteration variable. - iter_var: str = betterproto.string_field(1) - # The range over which var iterates. - iter_range: "Expr" = betterproto.message_field(2) - # The name of the variable used for accumulation of the result. - accu_var: str = betterproto.string_field(3) - # The initial value of the accumulator. - accu_init: "Expr" = betterproto.message_field(4) - # An expression which can contain iter_var and accu_var. Returns false when - # the result has been computed and may be used as a hint to short-circuit the - # remainder of the comprehension. - loop_condition: "Expr" = betterproto.message_field(5) - # An expression which can contain iter_var and accu_var. Computes the next - # value of accu_var. - loop_step: "Expr" = betterproto.message_field(6) - # An expression which can contain accu_var. Computes the result. - result: "Expr" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class Constant(betterproto.Message): - """ - Represents a primitive literal. Named 'Constant' here for backwards - compatibility. This is similar as the primitives supported in the well- - known type `google.protobuf.Value`, but richer so it can represent CEL's - full range of primitives. Lists and structs are not included as constants - as these aggregate types may contain [Expr][google.api.expr.v1alpha1.Expr] - elements which require evaluation and are thus not constant. Examples of - literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`, `true`, `null`. - """ - - # null value. - null_value: "betterproto_lib_google_protobuf.NullValue" = betterproto.enum_field( - 1, group="constant_kind" - ) - # boolean value. - bool_value: bool = betterproto.bool_field(2, group="constant_kind") - # int64 value. - int64_value: int = betterproto.int64_field(3, group="constant_kind") - # uint64 value. - uint64_value: int = betterproto.uint64_field(4, group="constant_kind") - # double value. - double_value: float = betterproto.double_field(5, group="constant_kind") - # string value. - string_value: str = betterproto.string_field(6, group="constant_kind") - # bytes value. - bytes_value: bytes = betterproto.bytes_field(7, group="constant_kind") - # protobuf.Duration value. Deprecated: duration is no longer considered a - # builtin cel type. - duration_value: timedelta = betterproto.message_field(8, group="constant_kind") - # protobuf.Timestamp value. Deprecated: timestamp is no longer considered a - # builtin cel type. - timestamp_value: datetime = betterproto.message_field(9, group="constant_kind") - - def __post_init__(self) -> None: - super().__post_init__() - if self.duration_value: - warnings.warn("Constant.duration_value is deprecated", DeprecationWarning) - if self.timestamp_value: - warnings.warn("Constant.timestamp_value is deprecated", DeprecationWarning) - - -@dataclass(eq=False, repr=False) -class SourceInfo(betterproto.Message): - """Source information collected at parse time.""" - - # The syntax version of the source, e.g. `cel1`. - syntax_version: str = betterproto.string_field(1) - # The location name. All position information attached to an expression is - # relative to this location. The location could be a file, UI element, or - # similar. For example, `acme/app/AnvilPolicy.cel`. - location: str = betterproto.string_field(2) - # Monotonically increasing list of code point offsets where newlines `\n` - # appear. The line number of a given position is the index `i` where for a - # given `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. - # The column may be derivd from `id_positions[id] - line_offsets[i]`. - line_offsets: List[int] = betterproto.int32_field(3) - # A map from the parse node id (e.g. `Expr.id`) to the code point offset - # within the source. - positions: Dict[int, int] = betterproto.map_field( - 4, betterproto.TYPE_INT64, betterproto.TYPE_INT32 - ) - # A map from the parse node id where a macro replacement was made to the call - # `Expr` that resulted in a macro expansion. For example, `has(value.field)` - # is a function call that is replaced by a `test_only` field selection in the - # AST. Likewise, the call `list.exists(e, e > 10)` translates to a - # comprehension expression. The key in the map corresponds to the expression - # id of the expanded macro, and the value is the call `Expr` that was - # replaced. - macro_calls: Dict[int, "Expr"] = betterproto.map_field( - 5, betterproto.TYPE_INT64, betterproto.TYPE_MESSAGE - ) - - -@dataclass(eq=False, repr=False) -class SourcePosition(betterproto.Message): - """A specific position in source.""" - - # The soucre location name (e.g. file name). - location: str = betterproto.string_field(1) - # The UTF-8 code unit offset. - offset: int = betterproto.int32_field(2) - # The 1-based index of the starting line in the source text where the issue - # occurs, or 0 if unknown. - line: int = betterproto.int32_field(3) - # The 0-based index of the starting position within the line of source text - # where the issue occurs. Only meaningful if line is nonzero. - column: int = betterproto.int32_field(4) - - -@dataclass(eq=False, repr=False) -class CheckedExpr(betterproto.Message): - """A CEL expression which has been successfully type checked.""" - - # A map from expression ids to resolved references. The following entries are - # in this table: - An Ident or Select expression is represented here if it - # resolves to a declaration. For instance, if `a.b.c` is represented by - # `select(select(id(a), b), c)`, and `a.b` resolves to a declaration, while - # `c` is a field selection, then the reference is attached to the nested - # select expression (but not to the id or or the outer select). In turn, if - # `a` resolves to a declaration and `b.c` are field selections, the - # reference is attached to the ident expression. - Every Call expression has - # an entry here, identifying the function being called. - Every - # CreateStruct expression for a message has an entry, identifying the - # message. - reference_map: Dict[int, "Reference"] = betterproto.map_field( - 2, betterproto.TYPE_INT64, betterproto.TYPE_MESSAGE - ) - # A map from expression ids to types. Every expression node which has a type - # different than DYN has a mapping here. If an expression has type DYN, it is - # omitted from this map to save space. - type_map: Dict[int, "Type"] = betterproto.map_field( - 3, betterproto.TYPE_INT64, betterproto.TYPE_MESSAGE - ) - # The source info derived from input that generated the parsed `expr` and any - # optimizations made during the type-checking pass. - source_info: "SourceInfo" = betterproto.message_field(5) - # The expr version indicates the major / minor version number of the `expr` - # representation. The most common reason for a version change will be to - # indicate to the CEL runtimes that transformations have been performed on - # the expr during static analysis. In some cases, this will save the runtime - # the work of applying the same or similar transformations prior to - # evaluation. - expr_version: str = betterproto.string_field(6) - # The checked expression. Semantically equivalent to the parsed `expr`, but - # may have structural differences. - expr: "Expr" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class Type(betterproto.Message): - """Represents a CEL type.""" - - # Dynamic type. - dyn: "betterproto_lib_google_protobuf.Empty" = betterproto.message_field( - 1, group="type_kind" - ) - # Null value. - null: "betterproto_lib_google_protobuf.NullValue" = betterproto.enum_field( - 2, group="type_kind" - ) - # Primitive types: `true`, `1u`, `-2.0`, `'string'`, `b'bytes'`. - primitive: "TypePrimitiveType" = betterproto.enum_field(3, group="type_kind") - # Wrapper of a primitive type, e.g. `google.protobuf.Int64Value`. - wrapper: "TypePrimitiveType" = betterproto.enum_field(4, group="type_kind") - # Well-known protobuf type such as `google.protobuf.Timestamp`. - well_known: "TypeWellKnownType" = betterproto.enum_field(5, group="type_kind") - # Parameterized list with elements of `list_type`, e.g. `list`. - list_type: "TypeListType" = betterproto.message_field(6, group="type_kind") - # Parameterized map with typed keys and values. - map_type: "TypeMapType" = betterproto.message_field(7, group="type_kind") - # Function type. - function: "TypeFunctionType" = betterproto.message_field(8, group="type_kind") - # Protocol buffer message type. The `message_type` string specifies the - # qualified message type name. For example, `google.plus.Profile`. - message_type: str = betterproto.string_field(9, group="type_kind") - # Type param type. The `type_param` string specifies the type parameter name, - # e.g. `list` would be a `list_type` whose element type was a `type_param` - # type named `E`. - type_param: str = betterproto.string_field(10, group="type_kind") - # Type type. The `type` value specifies the target type. e.g. int is type - # with a target type of `Primitive.INT`. - type: "Type" = betterproto.message_field(11, group="type_kind") - # Error type. During type-checking if an expression is an error, its type is - # propagated as the `ERROR` type. This permits the type-checker to discover - # other errors present in the expression. - error: "betterproto_lib_google_protobuf.Empty" = betterproto.message_field( - 12, group="type_kind" - ) - # Abstract, application defined type. - abstract_type: "TypeAbstractType" = betterproto.message_field(14, group="type_kind") - - -@dataclass(eq=False, repr=False) -class TypeListType(betterproto.Message): - """List type with typed elements, e.g. `list`.""" - - # The element type. - elem_type: "Type" = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class TypeMapType(betterproto.Message): - """ - Map type with parameterized key and value types, e.g. `map`. - """ - - # The type of the key. - key_type: "Type" = betterproto.message_field(1) - # The type of the value. - value_type: "Type" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class TypeFunctionType(betterproto.Message): - """Function type with result and arg types.""" - - # Result type of the function. - result_type: "Type" = betterproto.message_field(1) - # Argument types of the function. - arg_types: List["Type"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class TypeAbstractType(betterproto.Message): - """Application defined abstract type.""" - - # The fully qualified name of this abstract type. - name: str = betterproto.string_field(1) - # Parameter types for this abstract type. - parameter_types: List["Type"] = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Decl(betterproto.Message): - """ - Represents a declaration of a named value or function. A declaration is - part of the contract between the expression, the agent evaluating that - expression, and the caller requesting evaluation. - """ - - # The fully qualified name of the declaration. Declarations are organized in - # containers and this represents the full path to the declaration in its - # container, as in `google.api.expr.Decl`. Declarations used as [FunctionDecl - # .Overload][google.api.expr.v1alpha1.Decl.FunctionDecl.Overload] parameters - # may or may not have a name depending on whether the overload is function - # declaration or a function definition containing a result - # [Expr][google.api.expr.v1alpha1.Expr]. - name: str = betterproto.string_field(1) - # Identifier declaration. - ident: "DeclIdentDecl" = betterproto.message_field(2, group="decl_kind") - # Function declaration. - function: "DeclFunctionDecl" = betterproto.message_field(3, group="decl_kind") - - -@dataclass(eq=False, repr=False) -class DeclIdentDecl(betterproto.Message): - """ - Identifier declaration which specifies its type and optional `Expr` value. - An identifier without a value is a declaration that must be provided at - evaluation time. An identifier with a value should resolve to a constant, - but may be used in conjunction with other identifiers bound at evaluation - time. - """ - - # Required. The type of the identifier. - type: "Type" = betterproto.message_field(1) - # The constant value of the identifier. If not specified, the identifier must - # be supplied at evaluation time. - value: "Constant" = betterproto.message_field(2) - # Documentation string for the identifier. - doc: str = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class DeclFunctionDecl(betterproto.Message): - """ - Function declaration specifies one or more overloads which indicate the - function's parameter types and return type. Functions have no observable - side-effects (there may be side-effects like logging which are not - observable from CEL). - """ - - # Required. List of function overloads, must contain at least one overload. - overloads: List["DeclFunctionDeclOverload"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class DeclFunctionDeclOverload(betterproto.Message): - """ - An overload indicates a function's parameter types and return type, and may - optionally include a function body described in terms of - [Expr][google.api.expr.v1alpha1.Expr] values. Functions overloads are - declared in either a function or method call-style. For methods, the - `params[0]` is the expected type of the target receiver. Overloads must - have non-overlapping argument types after erasure of all parameterized type - variables (similar as type erasure in Java). - """ - - # Required. Globally unique overload name of the function which reflects the - # function name and argument types. This will be used by a - # [Reference][google.api.expr.v1alpha1.Reference] to indicate the - # `overload_id` that was resolved for the function `name`. - overload_id: str = betterproto.string_field(1) - # List of function parameter [Type][google.api.expr.v1alpha1.Type] values. - # Param types are disjoint after generic type parameters have been replaced - # with the type `DYN`. Since the `DYN` type is compatible with any other - # type, this means that if `A` is a type parameter, the function types - # `int` and `int` are not disjoint. Likewise, `map` - # is not disjoint from `map`. When the `result_type` of a function is a - # generic type param, the type param name also appears as the `type` of on at - # least one params. - params: List["Type"] = betterproto.message_field(2) - # The type param names associated with the function declaration. For example, - # `function ex(K key, map map) : V` would yield the type params of - # `K, V`. - type_params: List[str] = betterproto.string_field(3) - # Required. The result type of the function. For example, the operator - # `string.isEmpty()` would have `result_type` of `kind: BOOL`. - result_type: "Type" = betterproto.message_field(4) - # Whether the function is to be used in a method call-style `x.f(...)` of a - # function call-style `f(x, ...)`. For methods, the first parameter - # declaration, `params[0]` is the expected type of the target receiver. - is_instance_function: bool = betterproto.bool_field(5) - # Documentation string for the overload. - doc: str = betterproto.string_field(6) - - -@dataclass(eq=False, repr=False) -class Reference(betterproto.Message): - """Describes a resolved reference to a declaration.""" - - # The fully qualified name of the declaration. - name: str = betterproto.string_field(1) - # For references to functions, this is a list of `Overload.overload_id` - # values which match according to typing rules. If the list has more than one - # element, overload resolution among the presented candidates must happen at - # runtime because of dynamic types. The type checker attempts to narrow down - # this list as much as possible. Empty if this is not a reference to a - # [Decl.FunctionDecl][google.api.expr.v1alpha1.Decl.FunctionDecl]. - overload_id: List[str] = betterproto.string_field(3) - # For references to constants, this may contain the value of the constant if - # known at compile time. - value: "Constant" = betterproto.message_field(4) - - -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/google/rpc/__init__.py b/src/envoy_data_plane/google/rpc/__init__.py deleted file mode 100644 index 7162a1d..0000000 --- a/src/envoy_data_plane/google/rpc/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: google/rpc/status.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Status(betterproto.Message): - """ - The `Status` type defines a logical error model that is suitable for - different programming environments, including REST APIs and RPC APIs. It is - used by [gRPC](https://github.com/grpc). Each `Status` message contains - three pieces of data: error code, error message, and error details. You can - find out more about this error model and how to work with it in the [API - Design Guide](https://cloud.google.com/apis/design/errors). - """ - - # The status code, which should be an enum value of - # [google.rpc.Code][google.rpc.Code]. - code: int = betterproto.int32_field(1) - # A developer-facing error message, which should be in English. Any user- - # facing error message should be localized and sent in the - # [google.rpc.Status.details][google.rpc.Status.details] field, or localized - # by the client. - message: str = betterproto.string_field(2) - # A list of messages that carry the error details. There is a common set of - # message types for APIs to use. - details: List["betterproto_lib_google_protobuf.Any"] = betterproto.message_field(3) - - -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/io/__init__.py b/src/envoy_data_plane/io/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/io/prometheus/__init__.py b/src/envoy_data_plane/io/prometheus/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/io/prometheus/client/__init__.py b/src/envoy_data_plane/io/prometheus/client/__init__.py deleted file mode 100644 index 3bb8d5e..0000000 --- a/src/envoy_data_plane/io/prometheus/client/__init__.py +++ /dev/null @@ -1,92 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: io/prometheus/client/metrics.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class MetricType(betterproto.Enum): - COUNTER = 0 - GAUGE = 1 - SUMMARY = 2 - UNTYPED = 3 - HISTOGRAM = 4 - - -@dataclass(eq=False, repr=False) -class LabelPair(betterproto.Message): - name: str = betterproto.string_field(1) - value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class Gauge(betterproto.Message): - value: float = betterproto.double_field(1) - - -@dataclass(eq=False, repr=False) -class Counter(betterproto.Message): - value: float = betterproto.double_field(1) - exemplar: "Exemplar" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class Quantile(betterproto.Message): - quantile: float = betterproto.double_field(1) - value: float = betterproto.double_field(2) - - -@dataclass(eq=False, repr=False) -class Summary(betterproto.Message): - sample_count: int = betterproto.uint64_field(1) - sample_sum: float = betterproto.double_field(2) - quantile: List["Quantile"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Untyped(betterproto.Message): - value: float = betterproto.double_field(1) - - -@dataclass(eq=False, repr=False) -class Histogram(betterproto.Message): - sample_count: int = betterproto.uint64_field(1) - sample_sum: float = betterproto.double_field(2) - bucket: List["Bucket"] = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Bucket(betterproto.Message): - cumulative_count: int = betterproto.uint64_field(1) - upper_bound: float = betterproto.double_field(2) - exemplar: "Exemplar" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Exemplar(betterproto.Message): - label: List["LabelPair"] = betterproto.message_field(1) - value: float = betterproto.double_field(2) - timestamp: datetime = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class Metric(betterproto.Message): - label: List["LabelPair"] = betterproto.message_field(1) - gauge: "Gauge" = betterproto.message_field(2) - counter: "Counter" = betterproto.message_field(3) - summary: "Summary" = betterproto.message_field(4) - untyped: "Untyped" = betterproto.message_field(5) - histogram: "Histogram" = betterproto.message_field(7) - timestamp_ms: int = betterproto.int64_field(6) - - -@dataclass(eq=False, repr=False) -class MetricFamily(betterproto.Message): - name: str = betterproto.string_field(1) - help: str = betterproto.string_field(2) - type: "MetricType" = betterproto.enum_field(3) - metric: List["Metric"] = betterproto.message_field(4) diff --git a/src/envoy_data_plane/opencensus/__init__.py b/src/envoy_data_plane/opencensus/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/opencensus/proto/__init__.py b/src/envoy_data_plane/opencensus/proto/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/opencensus/proto/resource/__init__.py b/src/envoy_data_plane/opencensus/proto/resource/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/opencensus/proto/resource/v1/__init__.py b/src/envoy_data_plane/opencensus/proto/resource/v1/__init__.py deleted file mode 100644 index f30ec3b..0000000 --- a/src/envoy_data_plane/opencensus/proto/resource/v1/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: opencensus/proto/resource/v1/resource.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class Resource(betterproto.Message): - """Resource information.""" - - # Type identifier for the resource. - type: str = betterproto.string_field(1) - # Set of labels that describe the resource. - labels: Dict[str, str] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) diff --git a/src/envoy_data_plane/opencensus/proto/trace/__init__.py b/src/envoy_data_plane/opencensus/proto/trace/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/opencensus/proto/trace/v1/__init__.py b/src/envoy_data_plane/opencensus/proto/trace/v1/__init__.py deleted file mode 100644 index 1dc9736..0000000 --- a/src/envoy_data_plane/opencensus/proto/trace/v1/__init__.py +++ /dev/null @@ -1,424 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: opencensus/proto/trace/v1/trace.proto, opencensus/proto/trace/v1/trace_config.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime -from typing import Dict, List, Optional - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ConstantSamplerConstantDecision(betterproto.Enum): - ALWAYS_OFF = 0 - ALWAYS_ON = 1 - ALWAYS_PARENT = 2 - - -class SpanSpanKind(betterproto.Enum): - SPAN_KIND_UNSPECIFIED = 0 - SERVER = 1 - CLIENT = 2 - - -class SpanTimeEventMessageEventType(betterproto.Enum): - TYPE_UNSPECIFIED = 0 - SENT = 1 - RECEIVED = 2 - - -class SpanLinkType(betterproto.Enum): - TYPE_UNSPECIFIED = 0 - CHILD_LINKED_SPAN = 1 - PARENT_LINKED_SPAN = 2 - - -@dataclass(eq=False, repr=False) -class TraceConfig(betterproto.Message): - """ - Global configuration of the trace service. All fields must be specified, or - the default (zero) values will be used for each type. - """ - - probability_sampler: "ProbabilitySampler" = betterproto.message_field( - 1, group="sampler" - ) - constant_sampler: "ConstantSampler" = betterproto.message_field(2, group="sampler") - rate_limiting_sampler: "RateLimitingSampler" = betterproto.message_field( - 3, group="sampler" - ) - # The global default max number of attributes per span. - max_number_of_attributes: int = betterproto.int64_field(4) - # The global default max number of annotation events per span. - max_number_of_annotations: int = betterproto.int64_field(5) - # The global default max number of message events per span. - max_number_of_message_events: int = betterproto.int64_field(6) - # The global default max number of link entries per span. - max_number_of_links: int = betterproto.int64_field(7) - - -@dataclass(eq=False, repr=False) -class ProbabilitySampler(betterproto.Message): - """ - Sampler that tries to uniformly sample traces with a given probability. The - probability of sampling a trace is equal to that of the specified - probability. - """ - - # The desired probability of sampling. Must be within [0.0, 1.0]. - sampling_probability: float = betterproto.double_field(1) - - -@dataclass(eq=False, repr=False) -class ConstantSampler(betterproto.Message): - """Sampler that always makes a constant decision on span sampling.""" - - decision: "ConstantSamplerConstantDecision" = betterproto.enum_field(1) - - -@dataclass(eq=False, repr=False) -class RateLimitingSampler(betterproto.Message): - """Sampler that tries to sample with a rate per time window.""" - - # Rate per second. - qps: int = betterproto.int64_field(1) - - -@dataclass(eq=False, repr=False) -class Span(betterproto.Message): - """ - A span represents a single operation within a trace. Spans can be nested to - form a trace tree. Spans may also be linked to other spans from the same or - different trace. And form graphs. Often, a trace contains a root span that - describes the end-to-end latency, and one or more subspans for its sub- - operations. A trace can also contain multiple root spans, or none at all. - Spans do not need to be contiguous - there may be gaps or overlaps between - spans in a trace. The next id is 17. TODO(bdrutu): Add an example. - """ - - # A unique identifier for a trace. All spans from the same trace share the - # same `trace_id`. The ID is a 16-byte array. An ID with all zeroes is - # considered invalid. This field is semantically required. Receiver should - # generate new random trace_id if empty or invalid trace_id was received. - # This field is required. - trace_id: bytes = betterproto.bytes_field(1) - # A unique identifier for a span within a trace, assigned when the span is - # created. The ID is an 8-byte array. An ID with all zeroes is considered - # invalid. This field is semantically required. Receiver should generate new - # random span_id if empty or invalid span_id was received. This field is - # required. - span_id: bytes = betterproto.bytes_field(2) - # The Tracestate on the span. - tracestate: "SpanTracestate" = betterproto.message_field(15) - # The `span_id` of this span's parent span. If this is a root span, then this - # field must be empty. The ID is an 8-byte array. - parent_span_id: bytes = betterproto.bytes_field(3) - # A description of the span's operation. For example, the name can be a - # qualified method name or a file name and a line number where the operation - # is called. A best practice is to use the same display name at the same call - # point in an application. This makes it easier to correlate spans in - # different traces. This field is semantically required to be set to non- - # empty string. When null or empty string received - receiver may use string - # "name" as a replacement. There might be smarted algorithms implemented by - # receiver to fix the empty span name. This field is required. - name: "TruncatableString" = betterproto.message_field(4) - # Distinguishes between spans generated in a particular context. For example, - # two spans with the same name may be distinguished using `CLIENT` (caller) - # and `SERVER` (callee) to identify queueing latency associated with the - # span. - kind: "SpanSpanKind" = betterproto.enum_field(14) - # The start time of the span. On the client side, this is the time kept by - # the local machine where the span execution starts. On the server side, this - # is the time when the server's application handler starts running. This - # field is semantically required. When not set on receive - receiver should - # set it to the value of end_time field if it was set. Or to the current time - # if neither was set. It is important to keep end_time > start_time for - # consistency. This field is required. - start_time: datetime = betterproto.message_field(5) - # The end time of the span. On the client side, this is the time kept by the - # local machine where the span execution ends. On the server side, this is - # the time when the server application handler stops running. This field is - # semantically required. When not set on receive - receiver should set it to - # start_time value. It is important to keep end_time > start_time for - # consistency. This field is required. - end_time: datetime = betterproto.message_field(6) - # A set of attributes on the span. - attributes: "SpanAttributes" = betterproto.message_field(7) - # A stack trace captured at the start of the span. - stack_trace: "StackTrace" = betterproto.message_field(8) - # The included time events. - time_events: "SpanTimeEvents" = betterproto.message_field(9) - # The included links. - links: "SpanLinks" = betterproto.message_field(10) - # An optional final status for this span. Semantically when Status wasn't set - # it is means span ended without errors and assume Status.Ok (code = 0). - status: "Status" = betterproto.message_field(11) - # An optional resource that is associated with this span. If not set, this - # span should be part of a batch that does include the resource information, - # unless resource information is unknown. - resource: "__resource_v1__.Resource" = betterproto.message_field(16) - # A highly recommended but not required flag that identifies when a trace - # crosses a process boundary. True when the parent_span belongs to the same - # process as the current span. This flag is most commonly used to indicate - # the need to adjust time as clocks in different processes may not be - # synchronized. - same_process_as_parent_span: Optional[bool] = betterproto.message_field( - 12, wraps=betterproto.TYPE_BOOL - ) - # An optional number of child spans that were generated while this span was - # active. If set, allows an implementation to detect missing child spans. - child_span_count: Optional[int] = betterproto.message_field( - 13, wraps=betterproto.TYPE_UINT32 - ) - - -@dataclass(eq=False, repr=False) -class SpanTracestate(betterproto.Message): - """ - This field conveys information about request position in multiple - distributed tracing graphs. It is a list of Tracestate.Entry with a maximum - of 32 members in the list. See the https://github.com/w3c/distributed- - tracing for more details about this field. - """ - - # A list of entries that represent the Tracestate. - entries: List["SpanTracestateEntry"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class SpanTracestateEntry(betterproto.Message): - # The key must begin with a lowercase letter, and can only contain lowercase - # letters 'a'-'z', digits '0'-'9', underscores '_', dashes '-', asterisks - # '*', and forward slashes '/'. - key: str = betterproto.string_field(1) - # The value is opaque string up to 256 characters printable ASCII RFC0020 - # characters (i.e., the range 0x20 to 0x7E) except ',' and '='. Note that - # this also excludes tabs, newlines, carriage returns, etc. - value: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class SpanAttributes(betterproto.Message): - """A set of attributes, each with a key and a value.""" - - # The set of attributes. The value can be a string, an integer, a double or - # the Boolean values `true` or `false`. Note, global attributes like server - # name can be set as tags using resource API. Examples of attributes: - # "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) - # AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - # "/http/server_latency": 300 "abc.com/myattribute": true - # "abc.com/score": 10.239 - attribute_map: Dict[str, "AttributeValue"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - # The number of attributes that were discarded. Attributes can be discarded - # because their keys are too long or because there are too many attributes. - # If this value is 0, then no attributes were dropped. - dropped_attributes_count: int = betterproto.int32_field(2) - - -@dataclass(eq=False, repr=False) -class SpanTimeEvent(betterproto.Message): - """A time-stamped annotation or message event in the Span.""" - - # The time the event occurred. - time: datetime = betterproto.message_field(1) - # A text annotation with a set of attributes. - annotation: "SpanTimeEventAnnotation" = betterproto.message_field(2, group="value") - # An event describing a message sent/received between Spans. - message_event: "SpanTimeEventMessageEvent" = betterproto.message_field( - 3, group="value" - ) - - -@dataclass(eq=False, repr=False) -class SpanTimeEventAnnotation(betterproto.Message): - """A text annotation with a set of attributes.""" - - # A user-supplied message describing the event. - description: "TruncatableString" = betterproto.message_field(1) - # A set of attributes on the annotation. - attributes: "SpanAttributes" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class SpanTimeEventMessageEvent(betterproto.Message): - """An event describing a message sent/received between Spans.""" - - # The type of MessageEvent. Indicates whether the message was sent or - # received. - type: "SpanTimeEventMessageEventType" = betterproto.enum_field(1) - # An identifier for the MessageEvent's message that can be used to match SENT - # and RECEIVED MessageEvents. For example, this field could represent a - # sequence ID for a streaming RPC. It is recommended to be unique within a - # Span. - id: int = betterproto.uint64_field(2) - # The number of uncompressed bytes sent or received. - uncompressed_size: int = betterproto.uint64_field(3) - # The number of compressed bytes sent or received. If zero, assumed to be the - # same size as uncompressed. - compressed_size: int = betterproto.uint64_field(4) - - -@dataclass(eq=False, repr=False) -class SpanTimeEvents(betterproto.Message): - """ - A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation on - the span, consisting of either user-supplied key-value pairs, or details of - a message sent/received between Spans. - """ - - # A collection of `TimeEvent`s. - time_event: List["SpanTimeEvent"] = betterproto.message_field(1) - # The number of dropped annotations in all the included time events. If the - # value is 0, then no annotations were dropped. - dropped_annotations_count: int = betterproto.int32_field(2) - # The number of dropped message events in all the included time events. If - # the value is 0, then no message events were dropped. - dropped_message_events_count: int = betterproto.int32_field(3) - - -@dataclass(eq=False, repr=False) -class SpanLink(betterproto.Message): - """ - A pointer from the current span to another span in the same trace or in a - different trace. For example, this can be used in batching operations, - where a single batch handler processes multiple requests from different - traces or when the handler receives a request from a different project. - """ - - # A unique identifier of a trace that this linked span is part of. The ID is - # a 16-byte array. - trace_id: bytes = betterproto.bytes_field(1) - # A unique identifier for the linked span. The ID is an 8-byte array. - span_id: bytes = betterproto.bytes_field(2) - # The relationship of the current span relative to the linked span. - type: "SpanLinkType" = betterproto.enum_field(3) - # A set of attributes on the link. - attributes: "SpanAttributes" = betterproto.message_field(4) - - -@dataclass(eq=False, repr=False) -class SpanLinks(betterproto.Message): - """ - A collection of links, which are references from this span to a span in the - same or different trace. - """ - - # A collection of links. - link: List["SpanLink"] = betterproto.message_field(1) - # The number of dropped links after the maximum size was enforced. If this - # value is 0, then no links were dropped. - dropped_links_count: int = betterproto.int32_field(2) - - -@dataclass(eq=False, repr=False) -class Status(betterproto.Message): - """ - The `Status` type defines a logical error model that is suitable for - different programming environments, including REST APIs and RPC APIs. This - proto's fields are a subset of those of [google.rpc.Status](https://github. - com/googleapis/googleapis/blob/master/google/rpc/status.proto), which is - used by [gRPC](https://github.com/grpc). - """ - - # The status code. This is optional field. It is safe to assume 0 (OK) when - # not set. - code: int = betterproto.int32_field(1) - # A developer-facing error message, which should be in English. - message: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class AttributeValue(betterproto.Message): - """The value of an Attribute.""" - - # A string up to 256 bytes long. - string_value: "TruncatableString" = betterproto.message_field(1, group="value") - # A 64-bit signed integer. - int_value: int = betterproto.int64_field(2, group="value") - # A Boolean value represented by `true` or `false`. - bool_value: bool = betterproto.bool_field(3, group="value") - # A double value. - double_value: float = betterproto.double_field(4, group="value") - - -@dataclass(eq=False, repr=False) -class StackTrace(betterproto.Message): - """The call stack which originated this span.""" - - # Stack frames in this stack trace. - stack_frames: "StackTraceStackFrames" = betterproto.message_field(1) - # The hash ID is used to conserve network bandwidth for duplicate stack - # traces within a single trace. Often multiple spans will have identical - # stack traces. The first occurrence of a stack trace should contain both - # `stack_frames` and a value in `stack_trace_hash_id`. Subsequent spans - # within the same request can refer to that stack trace by setting only - # `stack_trace_hash_id`. TODO: describe how to deal with the case where - # stack_trace_hash_id is zero because it was not set. - stack_trace_hash_id: int = betterproto.uint64_field(2) - - -@dataclass(eq=False, repr=False) -class StackTraceStackFrame(betterproto.Message): - """A single stack frame in a stack trace.""" - - # The fully-qualified name that uniquely identifies the function or method - # that is active in this frame. - function_name: "TruncatableString" = betterproto.message_field(1) - # An un-mangled function name, if `function_name` is - # [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can be - # fully qualified. - original_function_name: "TruncatableString" = betterproto.message_field(2) - # The name of the source file where the function call appears. - file_name: "TruncatableString" = betterproto.message_field(3) - # The line number in `file_name` where the function call appears. - line_number: int = betterproto.int64_field(4) - # The column number where the function call appears, if available. This is - # important in JavaScript because of its anonymous functions. - column_number: int = betterproto.int64_field(5) - # The binary module from where the code was loaded. - load_module: "Module" = betterproto.message_field(6) - # The version of the deployed source code. - source_version: "TruncatableString" = betterproto.message_field(7) - - -@dataclass(eq=False, repr=False) -class StackTraceStackFrames(betterproto.Message): - """A collection of stack frames, which can be truncated.""" - - # Stack frames in this call stack. - frame: List["StackTraceStackFrame"] = betterproto.message_field(1) - # The number of stack frames that were dropped because there were too many - # stack frames. If this value is 0, then no stack frames were dropped. - dropped_frames_count: int = betterproto.int32_field(2) - - -@dataclass(eq=False, repr=False) -class Module(betterproto.Message): - """A description of a binary module.""" - - # TODO: document the meaning of this field. For example: main binary, kernel - # modules, and dynamic libraries such as libc.so, sharedlib.so. - module: "TruncatableString" = betterproto.message_field(1) - # A unique identifier for the module, usually a hash of its contents. - build_id: "TruncatableString" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class TruncatableString(betterproto.Message): - """A string that might be shortened to a specified length.""" - - # The shortened string. For example, if the original string was 500 bytes - # long and the limit of the string was 128 bytes, then this value contains - # the first 128 bytes of the 500-byte string. Note that truncation always - # happens on a character boundary, to ensure that a truncated string is still - # valid UTF-8. Because it may contain multi-byte characters, the size of the - # truncated string may be less than the truncation limit. - value: str = betterproto.string_field(1) - # The number of bytes removed from the original string. If this value is 0, - # then the string was not shortened. - truncated_byte_count: int = betterproto.int32_field(2) - - -from ...resource import v1 as __resource_v1__ diff --git a/src/envoy_data_plane/opentelemetry/__init__.py b/src/envoy_data_plane/opentelemetry/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/opentelemetry/proto/__init__.py b/src/envoy_data_plane/opentelemetry/proto/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/opentelemetry/proto/common/__init__.py b/src/envoy_data_plane/opentelemetry/proto/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/opentelemetry/proto/common/v1/__init__.py b/src/envoy_data_plane/opentelemetry/proto/common/v1/__init__.py deleted file mode 100644 index 0e6d520..0000000 --- a/src/envoy_data_plane/opentelemetry/proto/common/v1/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: opentelemetry/proto/common/v1/common.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class AnyValue(betterproto.Message): - """ - AnyValue is used to represent any type of attribute value. AnyValue may - contain a primitive value such as a string or integer or it may contain an - arbitrary nested object containing arrays, key-value lists and primitives. - """ - - string_value: str = betterproto.string_field(1, group="value") - bool_value: bool = betterproto.bool_field(2, group="value") - int_value: int = betterproto.int64_field(3, group="value") - double_value: float = betterproto.double_field(4, group="value") - array_value: "ArrayValue" = betterproto.message_field(5, group="value") - kvlist_value: "KeyValueList" = betterproto.message_field(6, group="value") - bytes_value: bytes = betterproto.bytes_field(7, group="value") - - -@dataclass(eq=False, repr=False) -class ArrayValue(betterproto.Message): - """ - ArrayValue is a list of AnyValue messages. We need ArrayValue as a message - since oneof in AnyValue does not allow repeated fields. - """ - - # Array of values. The array may be empty (contain 0 elements). - values: List["AnyValue"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class KeyValueList(betterproto.Message): - """ - KeyValueList is a list of KeyValue messages. We need KeyValueList as a - message since `oneof` in AnyValue does not allow repeated fields. - Everywhere else where we need a list of KeyValue messages (e.g. in Span) we - use `repeated KeyValue` directly to avoid unnecessary extra wrapping (which - slows down the protocol). The 2 approaches are semantically equivalent. - """ - - # A collection of key/value pairs of key-value pairs. The list may be empty - # (may contain 0 elements). - values: List["KeyValue"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class KeyValue(betterproto.Message): - """ - KeyValue is a key-value pair that is used to store Span attributes, Link - attributes, etc. - """ - - key: str = betterproto.string_field(1) - value: "AnyValue" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class StringKeyValue(betterproto.Message): - """ - StringKeyValue is a pair of key/value strings. This is the simpler (and - faster) version of KeyValue that only supports string values. - """ - - key: str = betterproto.string_field(1) - value: str = betterproto.string_field(2) - - def __post_init__(self) -> None: - warnings.warn("StringKeyValue is deprecated", DeprecationWarning) - super().__post_init__() - - -@dataclass(eq=False, repr=False) -class InstrumentationLibrary(betterproto.Message): - """ - InstrumentationLibrary is a message representing the instrumentation - library information such as the fully qualified name and version. - """ - - # An empty instrumentation library name means the name is unknown. - name: str = betterproto.string_field(1) - version: str = betterproto.string_field(2) diff --git a/src/envoy_data_plane/udpa/__init__.py b/src/envoy_data_plane/udpa/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/udpa/annotations/__init__.py b/src/envoy_data_plane/udpa/annotations/__init__.py deleted file mode 100644 index bfd543b..0000000 --- a/src/envoy_data_plane/udpa/annotations/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: udpa/annotations/migrate.proto, udpa/annotations/security.proto, udpa/annotations/sensitive.proto, udpa/annotations/status.proto, udpa/annotations/versioning.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class PackageVersionStatus(betterproto.Enum): - # Unknown package version status. - UNKNOWN = 0 - # This version of the package is frozen. - FROZEN = 1 - # This version of the package is the active development version. - ACTIVE = 2 - # This version of the package is the candidate for the next major version. It - # is typically machine generated from the active development version. - NEXT_MAJOR_VERSION_CANDIDATE = 3 - - -@dataclass(eq=False, repr=False) -class MigrateAnnotation(betterproto.Message): - # Rename the message/enum/enum value in next version. - rename: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class FieldMigrateAnnotation(betterproto.Message): - # Rename the field in next version. - rename: str = betterproto.string_field(1) - # Add the field to a named oneof in next version. If this already exists, the - # field will join its siblings under the oneof, otherwise a new oneof will be - # created with the given name. - oneof_promotion: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class FileMigrateAnnotation(betterproto.Message): - # Move all types in the file to another package, this implies changing proto - # file path. - move_to_package: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class StatusAnnotation(betterproto.Message): - # The entity is work-in-progress and subject to breaking changes. - work_in_progress: bool = betterproto.bool_field(1) - # The entity belongs to a package with the given version status. - package_version_status: "PackageVersionStatus" = betterproto.enum_field(2) - - -@dataclass(eq=False, repr=False) -class VersioningAnnotation(betterproto.Message): - # Track the previous message type. E.g. this message might be - # udpa.foo.v3alpha.Foo and it was previously udpa.bar.v2.Bar. This - # information is consumed by UDPA via proto descriptors. - previous_message_type: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class FieldSecurityAnnotation(betterproto.Message): - """ - These annotations indicate metadata for the purpose of understanding the - security significance of fields. - """ - - # Field should be set in the presence of untrusted downstreams. - configure_for_untrusted_downstream: bool = betterproto.bool_field(1) - # Field should be set in the presence of untrusted upstreams. - configure_for_untrusted_upstream: bool = betterproto.bool_field(2) diff --git a/src/envoy_data_plane/validate/__init__.py b/src/envoy_data_plane/validate/__init__.py deleted file mode 100644 index 6b4664f..0000000 --- a/src/envoy_data_plane/validate/__init__.py +++ /dev/null @@ -1,711 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: validate/validate.proto -# plugin: python-betterproto -from dataclasses import dataclass -from datetime import datetime, timedelta -from typing import List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class KnownRegex(betterproto.Enum): - """WellKnownRegex contain some well-known patterns.""" - - UNKNOWN = 0 - # HTTP header name as defined by RFC 7230. - HTTP_HEADER_NAME = 1 - # HTTP header value as defined by RFC 7230. - HTTP_HEADER_VALUE = 2 - - -@dataclass(eq=False, repr=False) -class FieldRules(betterproto.Message): - """ - FieldRules encapsulates the rules for each type of field. Depending on the - field, the correct set should be used to ensure proper validations. - """ - - message: "MessageRules" = betterproto.message_field(17) - # Scalar Field Types - float: "FloatRules" = betterproto.message_field(1, group="type") - double: "DoubleRules" = betterproto.message_field(2, group="type") - int32: "Int32Rules" = betterproto.message_field(3, group="type") - int64: "Int64Rules" = betterproto.message_field(4, group="type") - uint32: "UInt32Rules" = betterproto.message_field(5, group="type") - uint64: "UInt64Rules" = betterproto.message_field(6, group="type") - sint32: "SInt32Rules" = betterproto.message_field(7, group="type") - sint64: "SInt64Rules" = betterproto.message_field(8, group="type") - fixed32: "Fixed32Rules" = betterproto.message_field(9, group="type") - fixed64: "Fixed64Rules" = betterproto.message_field(10, group="type") - sfixed32: "SFixed32Rules" = betterproto.message_field(11, group="type") - sfixed64: "SFixed64Rules" = betterproto.message_field(12, group="type") - bool: "BoolRules" = betterproto.message_field(13, group="type") - string: "StringRules" = betterproto.message_field(14, group="type") - bytes: "BytesRules" = betterproto.message_field(15, group="type") - # Complex Field Types - enum: "EnumRules" = betterproto.message_field(16, group="type") - repeated: "RepeatedRules" = betterproto.message_field(18, group="type") - map: "MapRules" = betterproto.message_field(19, group="type") - # Well-Known Field Types - any: "AnyRules" = betterproto.message_field(20, group="type") - duration: "DurationRules" = betterproto.message_field(21, group="type") - timestamp: "TimestampRules" = betterproto.message_field(22, group="type") - - -@dataclass(eq=False, repr=False) -class FloatRules(betterproto.Message): - """FloatRules describes the constraints applied to `float` values""" - - # Const specifies that this field must be exactly the specified value - const: float = betterproto.float_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: float = betterproto.float_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: float = betterproto.float_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: float = betterproto.float_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: float = betterproto.float_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[float] = betterproto.float_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[float] = betterproto.float_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class DoubleRules(betterproto.Message): - """DoubleRules describes the constraints applied to `double` values""" - - # Const specifies that this field must be exactly the specified value - const: float = betterproto.double_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: float = betterproto.double_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: float = betterproto.double_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: float = betterproto.double_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: float = betterproto.double_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[float] = betterproto.double_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[float] = betterproto.double_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class Int32Rules(betterproto.Message): - """Int32Rules describes the constraints applied to `int32` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.int32_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.int32_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.int32_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.int32_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.int32_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.int32_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.int32_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class Int64Rules(betterproto.Message): - """Int64Rules describes the constraints applied to `int64` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.int64_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.int64_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.int64_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.int64_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.int64_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.int64_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.int64_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class UInt32Rules(betterproto.Message): - """UInt32Rules describes the constraints applied to `uint32` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.uint32_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.uint32_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.uint32_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.uint32_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.uint32_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.uint32_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.uint32_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class UInt64Rules(betterproto.Message): - """UInt64Rules describes the constraints applied to `uint64` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.uint64_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.uint64_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.uint64_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.uint64_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.uint64_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.uint64_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.uint64_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class SInt32Rules(betterproto.Message): - """SInt32Rules describes the constraints applied to `sint32` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.sint32_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.sint32_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.sint32_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.sint32_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.sint32_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.sint32_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.sint32_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class SInt64Rules(betterproto.Message): - """SInt64Rules describes the constraints applied to `sint64` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.sint64_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.sint64_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.sint64_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.sint64_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.sint64_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.sint64_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.sint64_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class Fixed32Rules(betterproto.Message): - """Fixed32Rules describes the constraints applied to `fixed32` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.fixed32_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.fixed32_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.fixed32_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.fixed32_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.fixed32_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.fixed32_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.fixed32_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class Fixed64Rules(betterproto.Message): - """Fixed64Rules describes the constraints applied to `fixed64` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.fixed64_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.fixed64_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.fixed64_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.fixed64_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.fixed64_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.fixed64_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.fixed64_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class SFixed32Rules(betterproto.Message): - """SFixed32Rules describes the constraints applied to `sfixed32` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.sfixed32_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.sfixed32_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.sfixed32_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.sfixed32_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.sfixed32_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.sfixed32_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.sfixed32_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class SFixed64Rules(betterproto.Message): - """SFixed64Rules describes the constraints applied to `sfixed64` values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.sfixed64_field(1) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: int = betterproto.sfixed64_field(2) - # Lte specifies that this field must be less than or equal to the specified - # value, inclusive - lte: int = betterproto.sfixed64_field(3) - # Gt specifies that this field must be greater than the specified value, - # exclusive. If the value of Gt is larger than a specified Lt or Lte, the - # range is reversed. - gt: int = betterproto.sfixed64_field(4) - # Gte specifies that this field must be greater than or equal to the - # specified value, inclusive. If the value of Gte is larger than a specified - # Lt or Lte, the range is reversed. - gte: int = betterproto.sfixed64_field(5) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.sfixed64_field(6) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.sfixed64_field(7) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(8) - - -@dataclass(eq=False, repr=False) -class BoolRules(betterproto.Message): - """BoolRules describes the constraints applied to `bool` values""" - - # Const specifies that this field must be exactly the specified value - const: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class StringRules(betterproto.Message): - """StringRules describe the constraints applied to `string` values""" - - # Const specifies that this field must be exactly the specified value - const: str = betterproto.string_field(1) - # Len specifies that this field must be the specified number of characters - # (Unicode code points). Note that the number of characters may differ from - # the number of bytes in the string. - len: int = betterproto.uint64_field(19) - # MinLen specifies that this field must be the specified number of characters - # (Unicode code points) at a minimum. Note that the number of characters may - # differ from the number of bytes in the string. - min_len: int = betterproto.uint64_field(2) - # MaxLen specifies that this field must be the specified number of characters - # (Unicode code points) at a maximum. Note that the number of characters may - # differ from the number of bytes in the string. - max_len: int = betterproto.uint64_field(3) - # LenBytes specifies that this field must be the specified number of bytes - len_bytes: int = betterproto.uint64_field(20) - # MinBytes specifies that this field must be the specified number of bytes at - # a minimum - min_bytes: int = betterproto.uint64_field(4) - # MaxBytes specifies that this field must be the specified number of bytes at - # a maximum - max_bytes: int = betterproto.uint64_field(5) - # Pattern specifes that this field must match against the specified regular - # expression (RE2 syntax). The included expression should elide any - # delimiters. - pattern: str = betterproto.string_field(6) - # Prefix specifies that this field must have the specified substring at the - # beginning of the string. - prefix: str = betterproto.string_field(7) - # Suffix specifies that this field must have the specified substring at the - # end of the string. - suffix: str = betterproto.string_field(8) - # Contains specifies that this field must have the specified substring - # anywhere in the string. - contains: str = betterproto.string_field(9) - # NotContains specifies that this field cannot have the specified substring - # anywhere in the string. - not_contains: str = betterproto.string_field(23) - # In specifies that this field must be equal to one of the specified values - in_: List[str] = betterproto.string_field(10) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[str] = betterproto.string_field(11) - # Email specifies that the field must be a valid email address as defined by - # RFC 5322 - email: bool = betterproto.bool_field(12, group="well_known") - # Hostname specifies that the field must be a valid hostname as defined by - # RFC 1034. This constraint does not support internationalized domain names - # (IDNs). - hostname: bool = betterproto.bool_field(13, group="well_known") - # Ip specifies that the field must be a valid IP (v4 or v6) address. Valid - # IPv6 addresses should not include surrounding square brackets. - ip: bool = betterproto.bool_field(14, group="well_known") - # Ipv4 specifies that the field must be a valid IPv4 address. - ipv4: bool = betterproto.bool_field(15, group="well_known") - # Ipv6 specifies that the field must be a valid IPv6 address. Valid IPv6 - # addresses should not include surrounding square brackets. - ipv6: bool = betterproto.bool_field(16, group="well_known") - # Uri specifies that the field must be a valid, absolute URI as defined by - # RFC 3986 - uri: bool = betterproto.bool_field(17, group="well_known") - # UriRef specifies that the field must be a valid URI as defined by RFC 3986 - # and may be relative or absolute. - uri_ref: bool = betterproto.bool_field(18, group="well_known") - # Address specifies that the field must be either a valid hostname as defined - # by RFC 1034 (which does not support internationalized domain names or - # IDNs), or it can be a valid IP (v4 or v6). - address: bool = betterproto.bool_field(21, group="well_known") - # Uuid specifies that the field must be a valid UUID as defined by RFC 4122 - uuid: bool = betterproto.bool_field(22, group="well_known") - # WellKnownRegex specifies a common well known pattern defined as a regex. - well_known_regex: "KnownRegex" = betterproto.enum_field(24, group="well_known") - # This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable - # strict header validation. By default, this is true, and HTTP header - # validations are RFC-compliant. Setting to false will enable a looser - # validations that only disallows \r\n\0 characters, which can be used to - # bypass header matching rules. - strict: bool = betterproto.bool_field(25) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(26) - - -@dataclass(eq=False, repr=False) -class BytesRules(betterproto.Message): - """BytesRules describe the constraints applied to `bytes` values""" - - # Const specifies that this field must be exactly the specified value - const: bytes = betterproto.bytes_field(1) - # Len specifies that this field must be the specified number of bytes - len: int = betterproto.uint64_field(13) - # MinLen specifies that this field must be the specified number of bytes at a - # minimum - min_len: int = betterproto.uint64_field(2) - # MaxLen specifies that this field must be the specified number of bytes at a - # maximum - max_len: int = betterproto.uint64_field(3) - # Pattern specifes that this field must match against the specified regular - # expression (RE2 syntax). The included expression should elide any - # delimiters. - pattern: str = betterproto.string_field(4) - # Prefix specifies that this field must have the specified bytes at the - # beginning of the string. - prefix: bytes = betterproto.bytes_field(5) - # Suffix specifies that this field must have the specified bytes at the end - # of the string. - suffix: bytes = betterproto.bytes_field(6) - # Contains specifies that this field must have the specified bytes anywhere - # in the string. - contains: bytes = betterproto.bytes_field(7) - # In specifies that this field must be equal to one of the specified values - in_: List[bytes] = betterproto.bytes_field(8) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[bytes] = betterproto.bytes_field(9) - # Ip specifies that the field must be a valid IP (v4 or v6) address in byte - # format - ip: bool = betterproto.bool_field(10, group="well_known") - # Ipv4 specifies that the field must be a valid IPv4 address in byte format - ipv4: bool = betterproto.bool_field(11, group="well_known") - # Ipv6 specifies that the field must be a valid IPv6 address in byte format - ipv6: bool = betterproto.bool_field(12, group="well_known") - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(14) - - -@dataclass(eq=False, repr=False) -class EnumRules(betterproto.Message): - """EnumRules describe the constraints applied to enum values""" - - # Const specifies that this field must be exactly the specified value - const: int = betterproto.int32_field(1) - # DefinedOnly specifies that this field must be only one of the defined - # values for this enum, failing on any undefined value. - defined_only: bool = betterproto.bool_field(2) - # In specifies that this field must be equal to one of the specified values - in_: List[int] = betterproto.int32_field(3) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[int] = betterproto.int32_field(4) - - -@dataclass(eq=False, repr=False) -class MessageRules(betterproto.Message): - """ - MessageRules describe the constraints applied to embedded message values. - For message-type fields, validation is performed recursively. - """ - - # Skip specifies that the validation rules of this field should not be - # evaluated - skip: bool = betterproto.bool_field(1) - # Required specifies that this field must be set - required: bool = betterproto.bool_field(2) - - -@dataclass(eq=False, repr=False) -class RepeatedRules(betterproto.Message): - """RepeatedRules describe the constraints applied to `repeated` values""" - - # MinItems specifies that this field must have the specified number of items - # at a minimum - min_items: int = betterproto.uint64_field(1) - # MaxItems specifies that this field must have the specified number of items - # at a maximum - max_items: int = betterproto.uint64_field(2) - # Unique specifies that all elements in this field must be unique. This - # contraint is only applicable to scalar and enum types (messages are not - # supported). - unique: bool = betterproto.bool_field(3) - # Items specifies the contraints to be applied to each item in the field. - # Repeated message fields will still execute validation against each item - # unless skip is specified here. - items: "FieldRules" = betterproto.message_field(4) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(5) - - -@dataclass(eq=False, repr=False) -class MapRules(betterproto.Message): - """MapRules describe the constraints applied to `map` values""" - - # MinPairs specifies that this field must have the specified number of KVs at - # a minimum - min_pairs: int = betterproto.uint64_field(1) - # MaxPairs specifies that this field must have the specified number of KVs at - # a maximum - max_pairs: int = betterproto.uint64_field(2) - # NoSparse specifies values in this field cannot be unset. This only applies - # to map's with message value types. - no_sparse: bool = betterproto.bool_field(3) - # Keys specifies the constraints to be applied to each key in the field. - keys: "FieldRules" = betterproto.message_field(4) - # Values specifies the constraints to be applied to the value of each key in - # the field. Message values will still have their validations evaluated - # unless skip is specified here. - values: "FieldRules" = betterproto.message_field(5) - # IgnoreEmpty specifies that the validation rules of this field should be - # evaluated only if the field is not empty - ignore_empty: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class AnyRules(betterproto.Message): - """ - AnyRules describe constraints applied exclusively to the - `google.protobuf.Any` well-known type - """ - - # Required specifies that this field must be set - required: bool = betterproto.bool_field(1) - # In specifies that this field's `type_url` must be equal to one of the - # specified values. - in_: List[str] = betterproto.string_field(2) - # NotIn specifies that this field's `type_url` must not be equal to any of - # the specified values. - not_in: List[str] = betterproto.string_field(3) - - -@dataclass(eq=False, repr=False) -class DurationRules(betterproto.Message): - """ - DurationRules describe the constraints applied exclusively to the - `google.protobuf.Duration` well-known type - """ - - # Required specifies that this field must be set - required: bool = betterproto.bool_field(1) - # Const specifies that this field must be exactly the specified value - const: timedelta = betterproto.message_field(2) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: timedelta = betterproto.message_field(3) - # Lt specifies that this field must be less than the specified value, - # inclusive - lte: timedelta = betterproto.message_field(4) - # Gt specifies that this field must be greater than the specified value, - # exclusive - gt: timedelta = betterproto.message_field(5) - # Gte specifies that this field must be greater than the specified value, - # inclusive - gte: timedelta = betterproto.message_field(6) - # In specifies that this field must be equal to one of the specified values - in_: List[timedelta] = betterproto.message_field(7) - # NotIn specifies that this field cannot be equal to one of the specified - # values - not_in: List[timedelta] = betterproto.message_field(8) - - -@dataclass(eq=False, repr=False) -class TimestampRules(betterproto.Message): - """ - TimestampRules describe the constraints applied exclusively to the - `google.protobuf.Timestamp` well-known type - """ - - # Required specifies that this field must be set - required: bool = betterproto.bool_field(1) - # Const specifies that this field must be exactly the specified value - const: datetime = betterproto.message_field(2) - # Lt specifies that this field must be less than the specified value, - # exclusive - lt: datetime = betterproto.message_field(3) - # Lte specifies that this field must be less than the specified value, - # inclusive - lte: datetime = betterproto.message_field(4) - # Gt specifies that this field must be greater than the specified value, - # exclusive - gt: datetime = betterproto.message_field(5) - # Gte specifies that this field must be greater than the specified value, - # inclusive - gte: datetime = betterproto.message_field(6) - # LtNow specifies that this must be less than the current time. LtNow can - # only be used with the Within rule. - lt_now: bool = betterproto.bool_field(7) - # GtNow specifies that this must be greater than the current time. GtNow can - # only be used with the Within rule. - gt_now: bool = betterproto.bool_field(8) - # Within specifies that this field must be within this duration of the - # current time. This constraint can be used alone or with the LtNow and GtNow - # rules. - within: timedelta = betterproto.message_field(9) diff --git a/src/envoy_data_plane/xds/__init__.py b/src/envoy_data_plane/xds/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/xds/annotations/__init__.py b/src/envoy_data_plane/xds/annotations/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/xds/annotations/v3/__init__.py b/src/envoy_data_plane/xds/annotations/v3/__init__.py deleted file mode 100644 index d6b963e..0000000 --- a/src/envoy_data_plane/xds/annotations/v3/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: xds/annotations/v3/status.proto -# plugin: python-betterproto -from dataclasses import dataclass - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class PackageVersionStatus(betterproto.Enum): - # Unknown package version status. - UNKNOWN = 0 - # This version of the package is frozen. - FROZEN = 1 - # This version of the package is the active development version. - ACTIVE = 2 - # This version of the package is the candidate for the next major version. It - # is typically machine generated from the active development version. - NEXT_MAJOR_VERSION_CANDIDATE = 3 - - -@dataclass(eq=False, repr=False) -class FileStatusAnnotation(betterproto.Message): - # The entity is work-in-progress and subject to breaking changes. - work_in_progress: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class MessageStatusAnnotation(betterproto.Message): - # The entity is work-in-progress and subject to breaking changes. - work_in_progress: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class FieldStatusAnnotation(betterproto.Message): - # The entity is work-in-progress and subject to breaking changes. - work_in_progress: bool = betterproto.bool_field(1) - - -@dataclass(eq=False, repr=False) -class StatusAnnotation(betterproto.Message): - # The entity is work-in-progress and subject to breaking changes. - work_in_progress: bool = betterproto.bool_field(1) - # The entity belongs to a package with the given version status. - package_version_status: "PackageVersionStatus" = betterproto.enum_field(2) diff --git a/src/envoy_data_plane/xds/core/__init__.py b/src/envoy_data_plane/xds/core/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/xds/core/v3/__init__.py b/src/envoy_data_plane/xds/core/v3/__init__.py deleted file mode 100644 index 4a83092..0000000 --- a/src/envoy_data_plane/xds/core/v3/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: xds/core/v3/authority.proto, xds/core/v3/collection_entry.proto, xds/core/v3/context_params.proto, xds/core/v3/extension.proto, xds/core/v3/resource_locator.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -class ResourceLocatorScheme(betterproto.Enum): - XDSTP = 0 - HTTP = 1 - FILE = 2 - - -@dataclass(eq=False, repr=False) -class ContextParams(betterproto.Message): - """ - Additional parameters that can be used to select resource variants. These - include any global context parameters, per-resource type client feature - capabilities and per-resource type functional attributes. All per-resource - type attributes will be `xds.resource.` prefixed and some of these are - documented below: `xds.resource.listening_address`: The value is "IP:port" - (e.g. "10.1.1.3:8080") which is the listening address of a Listener. Used - in a Listener resource query. - """ - - params: Dict[str, str] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass(eq=False, repr=False) -class Authority(betterproto.Message): - """xDS authority information.""" - - name: str = betterproto.string_field(1) - - -@dataclass(eq=False, repr=False) -class TypedExtensionConfig(betterproto.Message): - """Message type for extension configuration.""" - - # The name of an extension. This is not used to select the extension, instead - # it serves the role of an opaque identifier. - name: str = betterproto.string_field(1) - # The typed config for the extension. The type URL will be used to identify - # the extension. In the case that the type URL is *xds.type.v3.TypedStruct* - # (or, for historical reasons, *udpa.type.v1.TypedStruct*), the inner type - # URL of *TypedStruct* will be utilized. See the :ref:`extension - # configuration overview ` for - # further details. - typed_config: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class ResourceLocator(betterproto.Message): - """ - xDS resource locators identify a xDS resource name and instruct the data- - plane load balancer on how the resource may be located. Resource locators - have a canonical xdstp:// URI representation: - xdstp://{authority}/{type_url}/{id}?{context_params}{#directive,*} where - context_params take the form of URI query parameters. Resource locators - have a similar canonical http:// URI representation: - http://{authority}/{type_url}/{id}?{context_params}{#directive,*} Resource - locators also have a simplified file:// URI representation: - file:///{id}{#directive,*} - """ - - # URI scheme. - scheme: "ResourceLocatorScheme" = betterproto.enum_field(1) - # Opaque identifier for the resource. Any '/' will not be escaped during URI - # encoding and will form part of the URI path. This may end with ‘*’ for glob - # collection references. - id: str = betterproto.string_field(2) - # Logical authority for resource (not necessarily transport network address). - # Authorities are opaque in the xDS API, data-plane load balancers will map - # them to concrete network transports such as an xDS management server, e.g. - # via envoy.config.core.v3.ConfigSource. - authority: str = betterproto.string_field(3) - # Fully qualified resource type (as in type URL without types.googleapis.com/ - # prefix). - resource_type: str = betterproto.string_field(4) - # Additional parameters that can be used to select resource variants. Matches - # must be exact, i.e. all context parameters must match exactly and there - # must be no additional context parameters set on the matched resource. - exact_context: "ContextParams" = betterproto.message_field( - 5, group="context_param_specifier" - ) - # A list of directives that appear in the xDS resource locator #fragment. - # When encoding to URI form, directives are percent encoded with comma - # separation. - directives: List["ResourceLocatorDirective"] = betterproto.message_field(6) - - -@dataclass(eq=False, repr=False) -class ResourceLocatorDirective(betterproto.Message): - """ - Directives provide information to data-plane load balancers on how xDS - resource names are to be interpreted and potentially further resolved. For - example, they may provide alternative resource locators for when primary - resolution fails. Directives are not part of resource names and do not - appear in a xDS transport discovery request. When encoding to URIs, - directives take the form: = For example, we can have alt=xdstp://foo/bar or - entry=some%20thing. Each directive value type may have its own string - encoding, in the case of ResourceLocator there is a recursive URI encoding. - Percent encoding applies to the URI encoding of the directive value. - Multiple directives are comma-separated, so the reserved characters that - require percent encoding in a directive value are [',', '#', '[', ']', - '%']. These are the RFC3986 fragment reserved characters with the addition - of the xDS scheme specific ','. See - https://tools.ietf.org/html/rfc3986#page-49 for further details on URI ABNF - and reserved characters. - """ - - # An alternative resource locator for fallback if the resource is - # unavailable. For example, take the resource locator: xdstp://foo/some- - # type/some-route-table#alt=xdstp://bar/some-type/another-route-table If the - # data-plane load balancer is unable to reach `foo` to fetch the resource, it - # will fallback to `bar`. Alternative resources do not need to have - # equivalent content, but they should be functional substitutes. - alt: "ResourceLocator" = betterproto.message_field(1, group="directive") - # List collections support inlining of resources via the entry field in - # Resource. These inlined Resource objects may have an optional name field - # specified. When specified, the entry directive allows ResourceLocator to - # directly reference these inlined resources, e.g. xdstp://.../foo#entry=bar. - entry: str = betterproto.string_field(2, group="directive") - - -@dataclass(eq=False, repr=False) -class CollectionEntry(betterproto.Message): - """ - xDS collection resource wrapper. This encapsulates a xDS resource when - appearing inside a list collection resource. List collection resources are - regular Resource messages of type: message Collection { repeated - CollectionEntry resources = 1; } - """ - - # A resource locator describing how the member resource is to be located. - locator: "ResourceLocator" = betterproto.message_field( - 1, group="resource_specifier" - ) - # The resource is inlined in the list collection. - inline_entry: "CollectionEntryInlineEntry" = betterproto.message_field( - 2, group="resource_specifier" - ) - - -@dataclass(eq=False, repr=False) -class CollectionEntryInlineEntry(betterproto.Message): - """Inlined resource entry.""" - - # Optional name to describe the inlined resource. Resource names must - # [a-zA-Z0-9_-\./]+ (TODO(htuch): turn this into a PGV constraint once - # finalized, probably should be a RFC3986 pchar). This name allows reference - # via the #entry directive in ResourceLocator. - name: str = betterproto.string_field(1) - # The resource's logical version. It is illegal to have the same named xDS - # resource name at a given version with different resource payloads. - version: str = betterproto.string_field(2) - # The resource payload, including type URL. - resource: "betterproto_lib_google_protobuf.Any" = betterproto.message_field(3) - - -import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf diff --git a/src/envoy_data_plane/xds/type/__init__.py b/src/envoy_data_plane/xds/type/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/xds/type/matcher/__init__.py b/src/envoy_data_plane/xds/type/matcher/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/envoy_data_plane/xds/type/matcher/v3/__init__.py b/src/envoy_data_plane/xds/type/matcher/v3/__init__.py deleted file mode 100644 index c0ed9da..0000000 --- a/src/envoy_data_plane/xds/type/matcher/v3/__init__.py +++ /dev/null @@ -1,217 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: xds/type/matcher/v3/matcher.proto, xds/type/matcher/v3/regex.proto, xds/type/matcher/v3/string.proto -# plugin: python-betterproto -from dataclasses import dataclass -from typing import Dict, List - -import betterproto -from betterproto.grpc.grpclib_server import ServiceBase - - -@dataclass(eq=False, repr=False) -class RegexMatcher(betterproto.Message): - """A regex matcher designed for safety when used with untrusted input.""" - - # Google's RE2 regex engine. - google_re2: "RegexMatcherGoogleRe2" = betterproto.message_field( - 1, group="engine_type" - ) - # The regex match string. The string must be supported by the configured - # engine. - regex: str = betterproto.string_field(2) - - -@dataclass(eq=False, repr=False) -class RegexMatcherGoogleRe2(betterproto.Message): - """ - Google's `RE2 `_ regex engine. The regex - string must adhere to the documented `syntax - `_. The engine is designed to - complete execution in linear time as well as limit the amount of memory - used. Envoy supports program size checking via runtime. The runtime keys - `re2.max_program_size.error_level` and `re2.max_program_size.warn_level` - can be set to integers as the maximum program size or complexity that a - compiled regex can have before an exception is thrown or a warning is - logged, respectively. `re2.max_program_size.error_level` defaults to 100, - and `re2.max_program_size.warn_level` has no default if unset (will not - check/log a warning). Envoy emits two stats for tracking the program size - of regexes: the histogram `re2.program_size`, which records the program - size, and the counter `re2.exceeded_warn_level`, which is incremented each - time the program size exceeds the warn level threshold. - """ - - pass - - -@dataclass(eq=False, repr=False) -class StringMatcher(betterproto.Message): - """Specifies the way to match a string. [#next-free-field: 8]""" - - # The input string must match exactly the string specified here. Examples: * - # *abc* only matches the value *abc*. - exact: str = betterproto.string_field(1, group="match_pattern") - # The input string must have the prefix specified here. Note: empty prefix is - # not allowed, please use regex instead. Examples: * *abc* matches the value - # *abc.xyz* - prefix: str = betterproto.string_field(2, group="match_pattern") - # The input string must have the suffix specified here. Note: empty prefix is - # not allowed, please use regex instead. Examples: * *abc* matches the value - # *xyz.abc* - suffix: str = betterproto.string_field(3, group="match_pattern") - # The input string must match the regular expression specified here. - safe_regex: "RegexMatcher" = betterproto.message_field(5, group="match_pattern") - # The input string must have the substring specified here. Note: empty - # contains match is not allowed, please use regex instead. Examples: * *abc* - # matches the value *xyz.abc.def* - contains: str = betterproto.string_field(7, group="match_pattern") - # If true, indicates the exact/prefix/suffix matching should be case - # insensitive. This has no effect for the safe_regex match. For example, the - # matcher *data* will match both input string *Data* and *data* if set to - # true. - ignore_case: bool = betterproto.bool_field(6) - - -@dataclass(eq=False, repr=False) -class ListStringMatcher(betterproto.Message): - """Specifies a list of ways to match a string.""" - - patterns: List["StringMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class Matcher(betterproto.Message): - """ - A matcher, which may traverse a matching tree in order to result in a match - action. During matching, the tree will be traversed until a match is found, - or if no match is found the action specified by the most specific - on_no_match will be evaluated. As an on_no_match might result in another - matching tree being evaluated, this process might repeat several times - until the final OnMatch (or no match) is decided. - """ - - # A linear list of matchers to evaluate. - matcher_list: "MatcherMatcherList" = betterproto.message_field( - 1, group="matcher_type" - ) - # A match tree to evaluate. - matcher_tree: "MatcherMatcherTree" = betterproto.message_field( - 2, group="matcher_type" - ) - # Optional OnMatch to use if no matcher above matched (e.g., if there are no - # matchers specified above, or if none of the matches specified above - # succeeded). If no matcher above matched and this field is not populated, - # the match will be considered unsuccessful. - on_no_match: "MatcherOnMatch" = betterproto.message_field(3) - - -@dataclass(eq=False, repr=False) -class MatcherOnMatch(betterproto.Message): - """What to do if a match is successful.""" - - # Nested matcher to evaluate. If the nested matcher does not match and does - # not specify on_no_match, then this matcher is considered not to have - # matched, even if a predicate at this level or above returned true. - matcher: "Matcher" = betterproto.message_field(1, group="on_match") - # Protocol-specific action to take. - action: "___core_v3__.TypedExtensionConfig" = betterproto.message_field( - 2, group="on_match" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherList(betterproto.Message): - """ - A linear list of field matchers. The field matchers are evaluated in order, - and the first match wins. - """ - - # A list of matchers. First match wins. - matchers: List["MatcherMatcherListFieldMatcher"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListPredicate(betterproto.Message): - """Predicate to determine if a match is successful.""" - - # A single predicate to evaluate. - single_predicate: "MatcherMatcherListPredicateSinglePredicate" = ( - betterproto.message_field(1, group="match_type") - ) - # A list of predicates to be OR-ed together. - or_matcher: "MatcherMatcherListPredicatePredicateList" = betterproto.message_field( - 2, group="match_type" - ) - # A list of predicates to be AND-ed together. - and_matcher: "MatcherMatcherListPredicatePredicateList" = betterproto.message_field( - 3, group="match_type" - ) - # The invert of a predicate - not_matcher: "MatcherMatcherListPredicate" = betterproto.message_field( - 4, group="match_type" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListPredicateSinglePredicate(betterproto.Message): - """Predicate for a single input field.""" - - # Protocol-specific specification of input field to match on. [#extension- - # category: envoy.matching.common_inputs] - input: "___core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - # Built-in string matcher. - value_match: "StringMatcher" = betterproto.message_field(2, group="matcher") - # Extension for custom matching logic. [#extension-category: - # envoy.matching.input_matchers] - custom_match: "___core_v3__.TypedExtensionConfig" = betterproto.message_field( - 3, group="matcher" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListPredicatePredicateList(betterproto.Message): - """ - A list of two or more matchers. Used to allow using a list within a oneof. - """ - - predicate: List["MatcherMatcherListPredicate"] = betterproto.message_field(1) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherListFieldMatcher(betterproto.Message): - """An individual matcher.""" - - # Determines if the match succeeds. - predicate: "MatcherMatcherListPredicate" = betterproto.message_field(1) - # What to do if the match succeeds. - on_match: "MatcherOnMatch" = betterproto.message_field(2) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherTree(betterproto.Message): - # Protocol-specific specification of input field to match on. - input: "___core_v3__.TypedExtensionConfig" = betterproto.message_field(1) - exact_match_map: "MatcherMatcherTreeMatchMap" = betterproto.message_field( - 2, group="tree_type" - ) - # Longest matching prefix wins. - prefix_match_map: "MatcherMatcherTreeMatchMap" = betterproto.message_field( - 3, group="tree_type" - ) - # Extension for custom matching logic. - custom_match: "___core_v3__.TypedExtensionConfig" = betterproto.message_field( - 4, group="tree_type" - ) - - -@dataclass(eq=False, repr=False) -class MatcherMatcherTreeMatchMap(betterproto.Message): - """ - A map of configured matchers. Used to allow using a map within a oneof. - """ - - map: Dict[str, "MatcherOnMatch"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -from ....core import v3 as ___core_v3__ diff --git a/utils/download_protobufs.py b/utils/download_protobufs.py index 887e53f..423c5ad 100644 --- a/utils/download_protobufs.py +++ b/utils/download_protobufs.py @@ -19,7 +19,7 @@ utf8 = "utf-8" -ENVOY_VERSION = "1.21.0" +ENVOY_VERSION = "1.24.0" proto_include = protoc.pkg_resources.resource_filename("grpc_tools", "_proto") envoy = Path("./envoy")