diff --git a/.github/workflows/eks-tunnel.yaml b/.github/workflows/eks-tunnel.yaml index a505625983..8bd59956d4 100644 --- a/.github/workflows/eks-tunnel.yaml +++ b/.github/workflows/eks-tunnel.yaml @@ -158,11 +158,7 @@ jobs: # Port forward Relay cilium hubble port-forward& sleep 10s - if ! [[ $(pgrep -f "kubectl.*port-forward.*hubble-relay" | wc -l) == 1 ]]; then - # support for native port-forwarding - # TODO: remove kubectl version after 0.16.20 release - [[ $(pgrep -f "cilium.*hubble.*port-forward" | wc -l) == 1 ]] - fi + [[ $(pgrep -f "^cilium.*hubble.*port-forward$" | wc -l) == 1 ]] # Run connectivity test cilium connectivity test --test-concurrency=3 --all-flows --collect-sysdump-on-failure --external-target amazon.com. \ diff --git a/.github/workflows/eks.yaml b/.github/workflows/eks.yaml index eaffeb7670..e742563055 100644 --- a/.github/workflows/eks.yaml +++ b/.github/workflows/eks.yaml @@ -157,11 +157,7 @@ jobs: # Port forward Relay cilium hubble port-forward& sleep 10s - if ! [[ $(pgrep -f "kubectl.*port-forward.*hubble-relay" | wc -l) == 1 ]]; then - # support for native port-forwarding - # TODO: remove kubectl version after 0.16.20 release - [[ $(pgrep -f "cilium.*hubble.*port-forward" | wc -l) == 1 ]] - fi + [[ $(pgrep -f "^cilium.*hubble.*port-forward$" | wc -l) == 1 ]] # Run connectivity test cilium connectivity test --test-concurrency=3 --all-flows --collect-sysdump-on-failure --external-target amazon.com. diff --git a/.github/workflows/gke.yaml b/.github/workflows/gke.yaml index 437f9a9837..8ed1b4a54d 100644 --- a/.github/workflows/gke.yaml +++ b/.github/workflows/gke.yaml @@ -153,11 +153,8 @@ jobs: # Port forward Relay cilium hubble port-forward& sleep 10s - if ! [[ $(pgrep -f "kubectl.*port-forward.*hubble-relay" | wc -l) == 1 ]]; then - # support for native port-forwarding - # TODO: remove kubectl version after 0.16.20 release - [[ $(pgrep -f "cilium.*hubble.*port-forward" | wc -l) == 1 ]] - fi + + [[ $(pgrep -f "^cilium.*hubble.*port-forward$" | wc -l) == 1 ]] # Run connectivity test cilium connectivity test --test-concurrency=5 --all-flows --collect-sysdump-on-failure --external-target google.com. diff --git a/.github/workflows/kind.yaml b/.github/workflows/kind.yaml index be0f54b57d..9d3a5828c1 100644 --- a/.github/workflows/kind.yaml +++ b/.github/workflows/kind.yaml @@ -82,11 +82,7 @@ jobs: run: | cilium hubble port-forward& sleep 10s - if ! [[ $(pgrep -f "kubectl.*port-forward.*hubble-relay" | wc -l) == 1 ]]; then - # support for native port-forwarding - # TODO: remove kubectl version after 0.16.20 release - [[ $(pgrep -f "cilium.*hubble.*port-forward" | wc -l) == 1 ]] - fi + [[ $(pgrep -f "^cilium.*hubble.*port-forward$" | wc -l) == 1 ]] - name: Set up external targets id: external_targets @@ -164,11 +160,7 @@ jobs: run: | cilium hubble port-forward& sleep 10s - if ! [[ $(pgrep -f "kubectl.*port-forward.*hubble-relay" | wc -l) == 1 ]]; then - # support for native port-forwarding - # TODO: remove kubectl version after 0.16.20 release - [[ $(pgrep -f "cilium.*hubble.*port-forward" | wc -l) == 1 ]] - fi + [[ $(pgrep -f "^cilium.*hubble.*port-forward$" | wc -l) == 1 ]] - name: Connectivity test run: | diff --git a/.github/workflows/multicluster.yaml b/.github/workflows/multicluster.yaml index 9ba005c7a4..561580c642 100644 --- a/.github/workflows/multicluster.yaml +++ b/.github/workflows/multicluster.yaml @@ -250,11 +250,7 @@ jobs: # Port forward Relay cilium --context "${{ steps.contexts.outputs.cluster1 }}" hubble port-forward& sleep 10s - if ! [[ $(pgrep -f "kubectl.*port-forward.*hubble-relay" | wc -l) == 1 ]]; then - # support for native port-forwarding - # TODO: remove kubectl version after 0.16.20 release - [[ $(pgrep -f "cilium.*hubble.*port-forward" | wc -l) == 1 ]] - fi + [[ $(pgrep -f "^cilium.*hubble.*port-forward$" | wc -l) == 1 ]] # Run connectivity test cilium --context "${{ steps.contexts.outputs.cluster1 }}" connectivity test --test-concurrency=5 \ diff --git a/README.md b/README.md index 620fe696f1..3e0e6f44a4 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ binary releases. | Release | Maintained | Compatible Cilium Versions | |------------------------------------------------------------------------|------------|----------------------------| -| [v0.16.19](https://github.com/cilium/cilium-cli/releases/tag/v0.16.19) | Yes | Cilium 1.15 and newer | +| [v0.16.20](https://github.com/cilium/cilium-cli/releases/tag/v0.16.20) | Yes | Cilium 1.15 and newer | | [v0.15.22](https://github.com/cilium/cilium-cli/releases/tag/v0.15.22) | Yes | Cilium 1.14 (*) | Note: diff --git a/RELEASE.md b/RELEASE.md index 925d50a4c0..7c395e480e 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -19,7 +19,7 @@ table](https://github.com/cilium/cilium-cli#releases) for the most recent suppor Set `RELEASE` environment variable to the new version. This variable will be used in the commands throughout the documenat to allow copy-pasting. - export RELEASE=v0.16.20 + export RELEASE=v0.16.21 ## Update local checkout diff --git a/go.mod b/go.mod index 284225595c..93d82cb1bf 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ replace ( sigs.k8s.io/controller-tools => github.com/cilium/controller-tools v0.8.0-2 ) -require github.com/cilium/cilium v1.17.0-pre.1 +require github.com/cilium/cilium v1.17.0-pre.2 require ( cel.dev/expr v0.16.0 // indirect @@ -32,11 +32,11 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/cilium/charts v0.0.0-20240926142256-e20f2b5f5344 // indirect + github.com/cilium/charts v0.0.0-20241015090923-1f4c1b5ac12a // indirect github.com/cilium/ebpf v0.16.0 // indirect - github.com/cilium/hive v0.0.0-20240926131619-aa37668760f2 // indirect + github.com/cilium/hive v0.0.0-20241021113747-bb8f3c0bede4 // indirect github.com/cilium/proxy v0.0.0-20240909042906-ae435a5bef38 // indirect - github.com/cilium/statedb v0.3.0 // indirect + github.com/cilium/statedb v0.3.2 // indirect github.com/cilium/stream v0.0.0-20240816054136-71321e385273 // indirect github.com/cilium/workerpool v1.2.0 // indirect github.com/cloudflare/cfssl v1.6.5 // indirect @@ -58,7 +58,7 @@ require ( github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect @@ -137,9 +137,9 @@ require ( github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.59.1 // indirect + github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rubenv/sql-migrate v1.7.0 // indirect @@ -156,7 +156,7 @@ require ( github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/spf13/viper v1.19.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/vishvananda/netlink v1.3.0 // indirect + github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/weppos/publicsuffix-go v0.30.0 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -171,52 +171,53 @@ require ( go.etcd.io/etcd/client/v3 v3.5.16 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.30.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect - go.opentelemetry.io/otel/trace v1.30.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.27.0 // indirect + golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/term v0.24.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.26.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect - google.golang.org/grpc v1.67.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - helm.sh/helm/v3 v3.16.1 // indirect - k8s.io/api v0.31.1 // indirect - k8s.io/apiextensions-apiserver v0.31.1 // indirect - k8s.io/apimachinery v0.31.1 // indirect - k8s.io/apiserver v0.31.1 // indirect - k8s.io/cli-runtime v0.31.1 // indirect - k8s.io/client-go v0.31.1 // indirect - k8s.io/component-base v0.31.1 // indirect + helm.sh/helm/v3 v3.16.2 // indirect + k8s.io/api v0.31.2 // indirect + k8s.io/apiextensions-apiserver v0.31.2 // indirect + k8s.io/apimachinery v0.31.2 // indirect + k8s.io/apiserver v0.31.2 // indirect + k8s.io/cli-runtime v0.31.2 // indirect + k8s.io/client-go v0.31.2 // indirect + k8s.io/component-base v0.31.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 // indirect - k8s.io/kubectl v0.31.0 // indirect + k8s.io/kubectl v0.31.1 // indirect k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/controller-runtime v0.19.0 // indirect - sigs.k8s.io/gateway-api v1.2.0-rc1.0.20240923191000-5c5fc388829d // indirect + sigs.k8s.io/gateway-api v1.2.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect - sigs.k8s.io/mcs-api v0.1.1-0.20240919125245-7bbb5990134a // indirect + sigs.k8s.io/mcs-api v0.1.1-0.20241002142749-eff1ba8c3ab2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 801ce2097f..5086cac10e 100644 --- a/go.sum +++ b/go.sum @@ -60,18 +60,18 @@ github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHe github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/charts v0.0.0-20240926142256-e20f2b5f5344 h1:biAN1Y6c+77q59tDU74YzXB8ffZoe2KyryMU12PWB3k= -github.com/cilium/charts v0.0.0-20240926142256-e20f2b5f5344/go.mod h1:M3C9VOlFvRzuV+a01t07Tw4uFLSfkCH3L542IWjf6BU= -github.com/cilium/cilium v1.17.0-pre.1 h1:HIJgJ8mtGrz6fgRI6YA/TPAsx2s06rmJTvmVe8RiilA= -github.com/cilium/cilium v1.17.0-pre.1/go.mod h1:OM+QqlLdnaaQiGA9/OTTeVDBLyZYtwVDIbcTMMAm1gU= +github.com/cilium/charts v0.0.0-20241015090923-1f4c1b5ac12a h1:jyDHmM2GFbdsljXhgMbJ1Hc9bdNunKtzhjgiMEBKlNA= +github.com/cilium/charts v0.0.0-20241015090923-1f4c1b5ac12a/go.mod h1:M3C9VOlFvRzuV+a01t07Tw4uFLSfkCH3L542IWjf6BU= +github.com/cilium/cilium v1.17.0-pre.2 h1:Y9J4EalQEmtInHirFRF9qK7yAid3WS8/2A3/hnEfsV4= +github.com/cilium/cilium v1.17.0-pre.2/go.mod h1:OoSUlF3lvdYOmVccOYWZnfs9tDHDyEWTtrwjlq8pDYA= github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= -github.com/cilium/hive v0.0.0-20240926131619-aa37668760f2 h1:xZn7yvMbK1+Au6D/YLKEMdcEsyMt6Qu7CUv+yQHGqv0= -github.com/cilium/hive v0.0.0-20240926131619-aa37668760f2/go.mod h1:6tW1eCwSq8Wz8IVtpZE0MemoCWSrEOUa8aLKotmBRCo= +github.com/cilium/hive v0.0.0-20241021113747-bb8f3c0bede4 h1:dTnQNUDijFP+hf7soSZtoBYZ1OTV7qATqE+qbb//zUQ= +github.com/cilium/hive v0.0.0-20241021113747-bb8f3c0bede4/go.mod h1:pI2GJ1n3SLKIQVFrKF7W6A6gb6BQkZ+3Hp4PAEo5SuI= github.com/cilium/proxy v0.0.0-20240909042906-ae435a5bef38 h1:hbRPkcebWy1ZqqcnwiJJCFyzLa+xnXk6G/sM/eAsnrU= github.com/cilium/proxy v0.0.0-20240909042906-ae435a5bef38/go.mod h1:5L6S+WQ9v24ibJq38EMHEDljPrdx6PHqTDSHxRCJL2g= -github.com/cilium/statedb v0.3.0 h1:RpM6r1+gv8TY6V18DcrcMbGaoCBs0Vf9z7OxGCbPVaQ= -github.com/cilium/statedb v0.3.0/go.mod h1:AvMKi/i8VISTCvymtkTKSkz1uLlzuiYeaF8jvJO8ymU= +github.com/cilium/statedb v0.3.2 h1:gXjEEVv/zSNU41nHjlhOVqqpTWnMt+l+9Z+FhBnqCSk= +github.com/cilium/statedb v0.3.2/go.mod h1:KEdRTPdh54Asl6qimsUh6zFHVprL6ijp9/gCC+/3uA0= github.com/cilium/stream v0.0.0-20240816054136-71321e385273 h1:lyP0p5AW9fnNWmUcQ/BKaOmBEyZ+VWY1mGT1CFWv2b0= github.com/cilium/stream v0.0.0-20240816054136-71321e385273/go.mod h1:/e83AwqvNKpyg4n3C41qmnmj1x2G9DwzI+jb7GkF4lI= github.com/cilium/workerpool v1.2.0 h1:Wc2iOPTvCgWKQXeq4L5tnx4QFEI+z5q1+bSpSS0cnAY= @@ -135,8 +135,8 @@ github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= @@ -407,8 +407,8 @@ github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjz github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -416,8 +416,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -479,8 +479,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= -github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 h1:9fkQcQYvtTr9ayFXuMfDMVuDt4+BYG9FwsGLnrBde0M= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/weppos/publicsuffix-go v0.12.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= @@ -531,14 +531,14 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= @@ -561,8 +561,8 @@ golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= @@ -593,11 +593,11 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -634,26 +634,26 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -664,8 +664,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -677,13 +677,13 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= -google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -692,8 +692,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -716,46 +716,46 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -helm.sh/helm/v3 v3.16.1 h1:cER6tI/8PgUAsaJaQCVBUg3VI9KN4oVaZJgY60RIc0c= -helm.sh/helm/v3 v3.16.1/go.mod h1:r+xBHHP20qJeEqtvBXMf7W35QDJnzY/eiEBzt+TfHps= +helm.sh/helm/v3 v3.16.2 h1:Y9v7ry+ubQmi+cb5zw1Llx8OKHU9Hk9NQ/+P+LGBe2o= +helm.sh/helm/v3 v3.16.2/go.mod h1:SyTXgKBjNqi2NPsHCW5dDAsHqvGIu0kdNYNH9gQaw70= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= -k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c= -k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM= -k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= -k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= -k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= -k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= -k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= -k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4= +k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE= +k8s.io/cli-runtime v0.31.2 h1:7FQt4C4Xnqx8V1GJqymInK0FFsoC+fAZtbLqgXYVOLQ= +k8s.io/cli-runtime v0.31.2/go.mod h1:XROyicf+G7rQ6FQJMbeDV9jqxzkWXTYD6Uxd15noe0Q= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA= +k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 h1:Q8Z7VlGhcJgBHJHYugJ/K/7iB8a2eSxCyxdVjJp+lLY= k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubectl v0.31.0 h1:kANwAAPVY02r4U4jARP/C+Q1sssCcN/1p9Nk+7BQKVg= -k8s.io/kubectl v0.31.0/go.mod h1:pB47hhFypGsaHAPjlwrNbvhXgmuAr01ZBvAIIUaI8d4= +k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24= +k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= -sigs.k8s.io/gateway-api v1.2.0-rc1.0.20240923191000-5c5fc388829d h1:FZvkIACA+ke1SEbOiYyr3bfWr6QwJcrFYxEKyT6BAaQ= -sigs.k8s.io/gateway-api v1.2.0-rc1.0.20240923191000-5c5fc388829d/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= +sigs.k8s.io/gateway-api v1.2.0 h1:LrToiFwtqKTKZcZtoQPTuo3FxhrrhTgzQG0Te+YGSo8= +sigs.k8s.io/gateway-api v1.2.0/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= -sigs.k8s.io/mcs-api v0.1.1-0.20240919125245-7bbb5990134a h1:R2c2r4UrW0aDTc79y4MLTyDvhjdD+jAFtnjLLHDiDnc= -sigs.k8s.io/mcs-api v0.1.1-0.20240919125245-7bbb5990134a/go.mod h1:x0rgWQwGd3FJzrb94BNn3Nu7YxUwBWcgjVRbkrkVy2A= +sigs.k8s.io/mcs-api v0.1.1-0.20241002142749-eff1ba8c3ab2 h1:kYmFRW4FG7KvgoBRdvrlhFPScYu+ZKhVt+FBRl43CPE= +sigs.k8s.io/mcs-api v0.1.1-0.20241002142749-eff1ba8c3ab2/go.mod h1:x0rgWQwGd3FJzrb94BNn3Nu7YxUwBWcgjVRbkrkVy2A= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/vendor/github.com/cilium/charts/README.md b/vendor/github.com/cilium/charts/README.md index 88fea55229..fcf29bfcba 100644 --- a/vendor/github.com/cilium/charts/README.md +++ b/vendor/github.com/cilium/charts/README.md @@ -1,6 +1,8 @@ This repository holds helm templates for the following Cilium releases: +* [v1.17.0-pre.1](https://github.com/cilium/cilium/releases/tag/v1.17.0-pre.1) (_[source](https://github.com/cilium/cilium/tree/v1.17.0-pre.1/install/kubernetes/cilium)_) * [v1.17.0-pre.0](https://github.com/cilium/cilium/releases/tag/v1.17.0-pre.0) (_[source](https://github.com/cilium/cilium/tree/v1.17.0-pre.0/install/kubernetes/cilium)_) +* [v1.16.3](https://github.com/cilium/cilium/releases/tag/v1.16.3) (_[source](https://github.com/cilium/cilium/tree/v1.16.3/install/kubernetes/cilium)_) * [v1.16.2](https://github.com/cilium/cilium/releases/tag/v1.16.2) (_[source](https://github.com/cilium/cilium/tree/v1.16.2/install/kubernetes/cilium)_) * [v1.16.1](https://github.com/cilium/cilium/releases/tag/v1.16.1) (_[source](https://github.com/cilium/cilium/tree/v1.16.1/install/kubernetes/cilium)_) * [v1.16.0](https://github.com/cilium/cilium/releases/tag/v1.16.0) (_[source](https://github.com/cilium/cilium/tree/v1.16.0/install/kubernetes/cilium)_) @@ -11,6 +13,7 @@ This repository holds helm templates for the following Cilium releases: * [v1.16.0-pre.2](https://github.com/cilium/cilium/releases/tag/v1.16.0-pre.2) (_[source](https://github.com/cilium/cilium/tree/v1.16.0-pre.2/install/kubernetes/cilium)_) * [v1.16.0-pre.1](https://github.com/cilium/cilium/releases/tag/v1.16.0-pre.1) (_[source](https://github.com/cilium/cilium/tree/v1.16.0-pre.1/install/kubernetes/cilium)_) * [v1.16.0-pre.0](https://github.com/cilium/cilium/releases/tag/v1.16.0-pre.0) (_[source](https://github.com/cilium/cilium/tree/v1.16.0-pre.0/install/kubernetes/cilium)_) +* [v1.15.10](https://github.com/cilium/cilium/releases/tag/v1.15.10) (_[source](https://github.com/cilium/cilium/tree/v1.15.10/install/kubernetes/cilium)_) * [v1.15.9](https://github.com/cilium/cilium/releases/tag/v1.15.9) (_[source](https://github.com/cilium/cilium/tree/v1.15.9/install/kubernetes/cilium)_) * [v1.15.8](https://github.com/cilium/cilium/releases/tag/v1.15.8) (_[source](https://github.com/cilium/cilium/tree/v1.15.8/install/kubernetes/cilium)_) * [v1.15.7](https://github.com/cilium/cilium/releases/tag/v1.15.7) (_[source](https://github.com/cilium/cilium/tree/v1.15.7/install/kubernetes/cilium)_) @@ -27,6 +30,7 @@ This repository holds helm templates for the following Cilium releases: * [v1.15.0-pre.2](https://github.com/cilium/cilium/releases/tag/v1.15.0-pre.2) (_[source](https://github.com/cilium/cilium/tree/v1.15.0-pre.2/install/kubernetes/cilium)_) * [v1.15.0-pre.1](https://github.com/cilium/cilium/releases/tag/v1.15.0-pre.1) (_[source](https://github.com/cilium/cilium/tree/v1.15.0-pre.1/install/kubernetes/cilium)_) * [v1.15.0-pre.0](https://github.com/cilium/cilium/releases/tag/v1.15.0-pre.0) (_[source](https://github.com/cilium/cilium/tree/v1.15.0-pre.0/install/kubernetes/cilium)_) +* [v1.14.16](https://github.com/cilium/cilium/releases/tag/v1.14.16) (_[source](https://github.com/cilium/cilium/tree/v1.14.16/install/kubernetes/cilium)_) * [v1.14.15](https://github.com/cilium/cilium/releases/tag/v1.14.15) (_[source](https://github.com/cilium/cilium/tree/v1.14.15/install/kubernetes/cilium)_) * [v1.14.14](https://github.com/cilium/cilium/releases/tag/v1.14.14) (_[source](https://github.com/cilium/cilium/tree/v1.14.14/install/kubernetes/cilium)_) * [v1.14.13](https://github.com/cilium/cilium/releases/tag/v1.14.13) (_[source](https://github.com/cilium/cilium/tree/v1.14.13/install/kubernetes/cilium)_) diff --git a/vendor/github.com/cilium/charts/cilium-1.14.16.tgz b/vendor/github.com/cilium/charts/cilium-1.14.16.tgz new file mode 100644 index 0000000000..1cdc316372 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.14.16.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.15.10.tgz b/vendor/github.com/cilium/charts/cilium-1.15.10.tgz new file mode 100644 index 0000000000..ed4c82fc52 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.15.10.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.16.3.tgz b/vendor/github.com/cilium/charts/cilium-1.16.3.tgz new file mode 100644 index 0000000000..eaca333a47 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.16.3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.17.0-pre.1.tgz b/vendor/github.com/cilium/charts/cilium-1.17.0-pre.1.tgz new file mode 100644 index 0000000000..0c746409a0 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.17.0-pre.1.tgz differ diff --git a/vendor/github.com/cilium/charts/generate_helm_release.sh b/vendor/github.com/cilium/charts/generate_helm_release.sh index a4efbe5137..de0df1dcb1 100644 --- a/vendor/github.com/cilium/charts/generate_helm_release.sh +++ b/vendor/github.com/cilium/charts/generate_helm_release.sh @@ -7,7 +7,7 @@ DOCKER=${DOCKER:-docker} ORG=${ORG:-cilium} cosign() { - "${DOCKER}" run --rm gcr.io/projectsigstore/cosign:v2.2.4 "$@" + "${DOCKER}" run --rm ghcr.io/sigstore/cosign/cosign:v2.2.4 "$@" } helm() { diff --git a/vendor/github.com/cilium/charts/index.yaml b/vendor/github.com/cilium/charts/index.yaml index 8c48e04022..f24c0d23a9 100644 --- a/vendor/github.com/cilium/charts/index.yaml +++ b/vendor/github.com/cilium/charts/index.yaml @@ -1,6 +1,112 @@ apiVersion: v1 entries: cilium: + - annotations: + artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n + \ displayName: Cilium Network Policy\n description: |\n Cilium Network + Policies provide additional functionality beyond what\n is provided by + standard Kubernetes NetworkPolicy such as the ability\n to allow traffic + based on FQDNs, or to filter at Layer 7.\n- kind: CiliumClusterwideNetworkPolicy\n + \ version: v2\n name: ciliumclusterwidenetworkpolicies.cilium.io\n displayName: + Cilium Clusterwide Network Policy\n description: |\n Cilium Clusterwide + Network Policies support configuring network traffic\n policiies across + the entire cluster, including applying node firewalls.\n- kind: CiliumExternalWorkload\n + \ version: v2\n name: ciliumexternalworkloads.cilium.io\n displayName: Cilium + External Workload\n description: |\n Cilium External Workload supports + configuring the ability for external\n non-Kubernetes workloads to join + the cluster.\n- kind: CiliumLocalRedirectPolicy\n version: v2\n name: ciliumlocalredirectpolicies.cilium.io\n + \ displayName: Cilium Local Redirect Policy\n description: |\n Cilium + Local Redirect Policy allows local redirects to be configured\n within + a node to support use cases like Node-Local DNS or KIAM.\n- kind: CiliumNode\n + \ version: v2\n name: ciliumnodes.cilium.io\n displayName: Cilium Node\n + \ description: |\n Cilium Node represents a node managed by Cilium. It + contains a\n specification to control various node specific configuration + aspects\n and a status section to represent the status of the node.\n- + kind: CiliumIdentity\n version: v2\n name: ciliumidentities.cilium.io\n + \ displayName: Cilium Identity\n description: |\n Cilium Identity allows + introspection into security identities that\n Cilium allocates which identify + sets of labels that are assigned to\n individual endpoints in the cluster.\n- + kind: CiliumEndpoint\n version: v2\n name: ciliumendpoints.cilium.io\n displayName: + Cilium Endpoint\n description: |\n Cilium Endpoint represents the status + of individual pods or nodes in\n the cluster which are managed by Cilium, + including enforcement status,\n IP addressing and whether the networking + is successfully operational.\n- kind: CiliumEndpointSlice\n version: v2alpha1\n + \ name: ciliumendpointslices.cilium.io\n displayName: Cilium Endpoint Slice\n + \ description: |\n Cilium Endpoint Slice represents the status of groups + of pods or nodes\n in the cluster which are managed by Cilium, including + enforcement status,\n IP addressing and whether the networking is successfully + operational.\n- kind: CiliumEgressGatewayPolicy\n version: v2\n name: ciliumegressgatewaypolicies.cilium.io\n + \ displayName: Cilium Egress Gateway Policy\n description: |\n Cilium + Egress Gateway Policy provides control over the way that traffic\n leaves + the cluster and which source addresses to use for that traffic.\n- kind: CiliumClusterwideEnvoyConfig\n + \ version: v2\n name: ciliumclusterwideenvoyconfigs.cilium.io\n displayName: + Cilium Clusterwide Envoy Config\n description: |\n Cilium Clusterwide + Envoy Config specifies Envoy resources and K8s service mappings\n to be + provisioned into Cilium host proxy instances in cluster context.\n- kind: + CiliumEnvoyConfig\n version: v2\n name: ciliumenvoyconfigs.cilium.io\n displayName: + Cilium Envoy Config\n description: |\n Cilium Envoy Config specifies Envoy + resources and K8s service mappings\n to be provisioned into Cilium host + proxy instances in namespace context.\n- kind: CiliumBGPPeeringPolicy\n version: + v2alpha1\n name: ciliumbgppeeringpolicies.cilium.io\n displayName: Cilium + BGP Peering Policy\n description: |\n Cilium BGP Peering Policy instructs + Cilium to create specific BGP peering\n configurations.\n- kind: CiliumBGPClusterConfig\n + \ version: v2alpha1\n name: ciliumbgpclusterconfigs.cilium.io\n displayName: + Cilium BGP Cluster Config\n description: |\n Cilium BGP Cluster Config + instructs Cilium operator to create specific BGP cluster\n configurations.\n- + kind: CiliumBGPPeerConfig\n version: v2alpha1\n name: ciliumbgppeerconfigs.cilium.io\n + \ displayName: Cilium BGP Peer Config\n description: |\n CiliumBGPPeerConfig + is a common set of BGP peer configurations. It can be referenced \n by + multiple peers from CiliumBGPClusterConfig.\n- kind: CiliumBGPAdvertisement\n + \ version: v2alpha1\n name: ciliumbgpadvertisements.cilium.io\n displayName: + Cilium BGP Advertisement\n description: |\n CiliumBGPAdvertisement is + used to define source of BGP advertisement as well as BGP attributes \n to + be advertised with those prefixes.\n- kind: CiliumBGPNodeConfig\n version: + v2alpha1\n name: ciliumbgpnodeconfigs.cilium.io\n displayName: Cilium BGP + Node Config\n description: |\n CiliumBGPNodeConfig is read only node specific + BGP configuration. It is constructed by Cilium operator.\n It will also + contain node local BGP state information.\n- kind: CiliumBGPNodeConfigOverride\n + \ version: v2alpha1\n name: ciliumbgpnodeconfigoverrides.cilium.io\n displayName: + Cilium BGP Node Config Override\n description: |\n CiliumBGPNodeConfigOverride + can be used to override node specific BGP configuration.\n- kind: CiliumLoadBalancerIPPool\n + \ version: v2alpha1\n name: ciliumloadbalancerippools.cilium.io\n displayName: + Cilium Load Balancer IP Pool\n description: |\n Defining a Cilium Load + Balancer IP Pool instructs Cilium to assign IPs to LoadBalancer Services.\n- + kind: CiliumNodeConfig\n version: v2alpha1\n name: ciliumnodeconfigs.cilium.io\n + \ displayName: Cilium Node Configuration\n description: |\n CiliumNodeConfig + is a list of configuration key-value pairs. It is applied to\n nodes indicated + by a label selector.\n- kind: CiliumCIDRGroup\n version: v2alpha1\n name: + ciliumcidrgroups.cilium.io\n displayName: Cilium CIDR Group\n description: + |\n CiliumCIDRGroup is a list of CIDRs that can be referenced as a single + entity from CiliumNetworkPolicies.\n- kind: CiliumL2AnnouncementPolicy\n version: + v2alpha1\n name: ciliuml2announcementpolicies.cilium.io\n displayName: Cilium + L2 Announcement Policy\n description: |\n CiliumL2AnnouncementPolicy is + a policy which determines which service IPs will be announced to\n the + local area network, by which nodes, and via which interfaces.\n- kind: CiliumPodIPPool\n + \ version: v2alpha1\n name: ciliumpodippools.cilium.io\n displayName: Cilium + Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that + can be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n" + apiVersion: v2 + appVersion: 1.17.0-pre.1 + created: "2024-10-01T08:15:06.105239065Z" + description: eBPF-based Networking, Security, and Observability + digest: 414555ed00b250bbaa9edff538d1da68504a557f1c0fd165edca313bfade2d3a + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.21.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.17.0-pre.1.tgz + version: 1.17.0-pre.1 - annotations: artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n \ displayName: Cilium Network Policy\n description: |\n Cilium Network @@ -107,6 +213,112 @@ entries: urls: - cilium-1.17.0-pre.0.tgz version: 1.17.0-pre.0 + - annotations: + artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n + \ displayName: Cilium Network Policy\n description: |\n Cilium Network + Policies provide additional functionality beyond what\n is provided by + standard Kubernetes NetworkPolicy such as the ability\n to allow traffic + based on FQDNs, or to filter at Layer 7.\n- kind: CiliumClusterwideNetworkPolicy\n + \ version: v2\n name: ciliumclusterwidenetworkpolicies.cilium.io\n displayName: + Cilium Clusterwide Network Policy\n description: |\n Cilium Clusterwide + Network Policies support configuring network traffic\n policiies across + the entire cluster, including applying node firewalls.\n- kind: CiliumExternalWorkload\n + \ version: v2\n name: ciliumexternalworkloads.cilium.io\n displayName: Cilium + External Workload\n description: |\n Cilium External Workload supports + configuring the ability for external\n non-Kubernetes workloads to join + the cluster.\n- kind: CiliumLocalRedirectPolicy\n version: v2\n name: ciliumlocalredirectpolicies.cilium.io\n + \ displayName: Cilium Local Redirect Policy\n description: |\n Cilium + Local Redirect Policy allows local redirects to be configured\n within + a node to support use cases like Node-Local DNS or KIAM.\n- kind: CiliumNode\n + \ version: v2\n name: ciliumnodes.cilium.io\n displayName: Cilium Node\n + \ description: |\n Cilium Node represents a node managed by Cilium. It + contains a\n specification to control various node specific configuration + aspects\n and a status section to represent the status of the node.\n- + kind: CiliumIdentity\n version: v2\n name: ciliumidentities.cilium.io\n + \ displayName: Cilium Identity\n description: |\n Cilium Identity allows + introspection into security identities that\n Cilium allocates which identify + sets of labels that are assigned to\n individual endpoints in the cluster.\n- + kind: CiliumEndpoint\n version: v2\n name: ciliumendpoints.cilium.io\n displayName: + Cilium Endpoint\n description: |\n Cilium Endpoint represents the status + of individual pods or nodes in\n the cluster which are managed by Cilium, + including enforcement status,\n IP addressing and whether the networking + is successfully operational.\n- kind: CiliumEndpointSlice\n version: v2alpha1\n + \ name: ciliumendpointslices.cilium.io\n displayName: Cilium Endpoint Slice\n + \ description: |\n Cilium Endpoint Slice represents the status of groups + of pods or nodes\n in the cluster which are managed by Cilium, including + enforcement status,\n IP addressing and whether the networking is successfully + operational.\n- kind: CiliumEgressGatewayPolicy\n version: v2\n name: ciliumegressgatewaypolicies.cilium.io\n + \ displayName: Cilium Egress Gateway Policy\n description: |\n Cilium + Egress Gateway Policy provides control over the way that traffic\n leaves + the cluster and which source addresses to use for that traffic.\n- kind: CiliumClusterwideEnvoyConfig\n + \ version: v2\n name: ciliumclusterwideenvoyconfigs.cilium.io\n displayName: + Cilium Clusterwide Envoy Config\n description: |\n Cilium Clusterwide + Envoy Config specifies Envoy resources and K8s service mappings\n to be + provisioned into Cilium host proxy instances in cluster context.\n- kind: + CiliumEnvoyConfig\n version: v2\n name: ciliumenvoyconfigs.cilium.io\n displayName: + Cilium Envoy Config\n description: |\n Cilium Envoy Config specifies Envoy + resources and K8s service mappings\n to be provisioned into Cilium host + proxy instances in namespace context.\n- kind: CiliumBGPPeeringPolicy\n version: + v2alpha1\n name: ciliumbgppeeringpolicies.cilium.io\n displayName: Cilium + BGP Peering Policy\n description: |\n Cilium BGP Peering Policy instructs + Cilium to create specific BGP peering\n configurations.\n- kind: CiliumBGPClusterConfig\n + \ version: v2alpha1\n name: ciliumbgpclusterconfigs.cilium.io\n displayName: + Cilium BGP Cluster Config\n description: |\n Cilium BGP Cluster Config + instructs Cilium operator to create specific BGP cluster\n configurations.\n- + kind: CiliumBGPPeerConfig\n version: v2alpha1\n name: ciliumbgppeerconfigs.cilium.io\n + \ displayName: Cilium BGP Peer Config\n description: |\n CiliumBGPPeerConfig + is a common set of BGP peer configurations. It can be referenced \n by + multiple peers from CiliumBGPClusterConfig.\n- kind: CiliumBGPAdvertisement\n + \ version: v2alpha1\n name: ciliumbgpadvertisements.cilium.io\n displayName: + Cilium BGP Advertisement\n description: |\n CiliumBGPAdvertisement is + used to define source of BGP advertisement as well as BGP attributes \n to + be advertised with those prefixes.\n- kind: CiliumBGPNodeConfig\n version: + v2alpha1\n name: ciliumbgpnodeconfigs.cilium.io\n displayName: Cilium BGP + Node Config\n description: |\n CiliumBGPNodeConfig is read only node specific + BGP configuration. It is constructed by Cilium operator.\n It will also + contain node local BGP state information.\n- kind: CiliumBGPNodeConfigOverride\n + \ version: v2alpha1\n name: ciliumbgpnodeconfigoverrides.cilium.io\n displayName: + Cilium BGP Node Config Override\n description: |\n CiliumBGPNodeConfigOverride + can be used to override node specific BGP configuration.\n- kind: CiliumLoadBalancerIPPool\n + \ version: v2alpha1\n name: ciliumloadbalancerippools.cilium.io\n displayName: + Cilium Load Balancer IP Pool\n description: |\n Defining a Cilium Load + Balancer IP Pool instructs Cilium to assign IPs to LoadBalancer Services.\n- + kind: CiliumNodeConfig\n version: v2alpha1\n name: ciliumnodeconfigs.cilium.io\n + \ displayName: Cilium Node Configuration\n description: |\n CiliumNodeConfig + is a list of configuration key-value pairs. It is applied to\n nodes indicated + by a label selector.\n- kind: CiliumCIDRGroup\n version: v2alpha1\n name: + ciliumcidrgroups.cilium.io\n displayName: Cilium CIDR Group\n description: + |\n CiliumCIDRGroup is a list of CIDRs that can be referenced as a single + entity from CiliumNetworkPolicies.\n- kind: CiliumL2AnnouncementPolicy\n version: + v2alpha1\n name: ciliuml2announcementpolicies.cilium.io\n displayName: Cilium + L2 Announcement Policy\n description: |\n CiliumL2AnnouncementPolicy is + a policy which determines which service IPs will be announced to\n the + local area network, by which nodes, and via which interfaces.\n- kind: CiliumPodIPPool\n + \ version: v2alpha1\n name: ciliumpodippools.cilium.io\n displayName: Cilium + Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that + can be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n" + apiVersion: v2 + appVersion: 1.16.3 + created: "2024-10-11T23:02:59.494005644Z" + description: eBPF-based Networking, Security, and Observability + digest: e1be328218c74bd2bed91f996d8c1b10e785715ce53299a392f79b0cef796805 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.21.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.16.3.tgz + version: 1.16.3 - annotations: artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n \ displayName: Cilium Network Policy\n description: |\n Cilium Network @@ -1273,6 +1485,112 @@ entries: urls: - cilium-1.16.0-dev.tgz version: 1.16.0-dev + - annotations: + artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n + \ displayName: Cilium Network Policy\n description: |\n Cilium Network + Policies provide additional functionality beyond what\n is provided by + standard Kubernetes NetworkPolicy such as the ability\n to allow traffic + based on FQDNs, or to filter at Layer 7.\n- kind: CiliumClusterwideNetworkPolicy\n + \ version: v2\n name: ciliumclusterwidenetworkpolicies.cilium.io\n displayName: + Cilium Clusterwide Network Policy\n description: |\n Cilium Clusterwide + Network Policies support configuring network traffic\n policiies across + the entire cluster, including applying node firewalls.\n- kind: CiliumExternalWorkload\n + \ version: v2\n name: ciliumexternalworkloads.cilium.io\n displayName: Cilium + External Workload\n description: |\n Cilium External Workload supports + configuring the ability for external\n non-Kubernetes workloads to join + the cluster.\n- kind: CiliumLocalRedirectPolicy\n version: v2\n name: ciliumlocalredirectpolicies.cilium.io\n + \ displayName: Cilium Local Redirect Policy\n description: |\n Cilium + Local Redirect Policy allows local redirects to be configured\n within + a node to support use cases like Node-Local DNS or KIAM.\n- kind: CiliumNode\n + \ version: v2\n name: ciliumnodes.cilium.io\n displayName: Cilium Node\n + \ description: |\n Cilium Node represents a node managed by Cilium. It + contains a\n specification to control various node specific configuration + aspects\n and a status section to represent the status of the node.\n- + kind: CiliumIdentity\n version: v2\n name: ciliumidentities.cilium.io\n + \ displayName: Cilium Identity\n description: |\n Cilium Identity allows + introspection into security identities that\n Cilium allocates which identify + sets of labels that are assigned to\n individual endpoints in the cluster.\n- + kind: CiliumEndpoint\n version: v2\n name: ciliumendpoints.cilium.io\n displayName: + Cilium Endpoint\n description: |\n Cilium Endpoint represents the status + of individual pods or nodes in\n the cluster which are managed by Cilium, + including enforcement status,\n IP addressing and whether the networking + is successfully operational.\n- kind: CiliumEndpointSlice\n version: v2alpha1\n + \ name: ciliumendpointslices.cilium.io\n displayName: Cilium Endpoint Slice\n + \ description: |\n Cilium Endpoint Slice represents the status of groups + of pods or nodes\n in the cluster which are managed by Cilium, including + enforcement status,\n IP addressing and whether the networking is successfully + operational.\n- kind: CiliumEgressGatewayPolicy\n version: v2\n name: ciliumegressgatewaypolicies.cilium.io\n + \ displayName: Cilium Egress Gateway Policy\n description: |\n Cilium + Egress Gateway Policy provides control over the way that traffic\n leaves + the cluster and which source addresses to use for that traffic.\n- kind: CiliumClusterwideEnvoyConfig\n + \ version: v2\n name: ciliumclusterwideenvoyconfigs.cilium.io\n displayName: + Cilium Clusterwide Envoy Config\n description: |\n Cilium Clusterwide + Envoy Config specifies Envoy resources and K8s service mappings\n to be + provisioned into Cilium host proxy instances in cluster context.\n- kind: + CiliumEnvoyConfig\n version: v2\n name: ciliumenvoyconfigs.cilium.io\n displayName: + Cilium Envoy Config\n description: |\n Cilium Envoy Config specifies Envoy + resources and K8s service mappings\n to be provisioned into Cilium host + proxy instances in namespace context.\n- kind: CiliumBGPPeeringPolicy\n version: + v2alpha1\n name: ciliumbgppeeringpolicies.cilium.io\n displayName: Cilium + BGP Peering Policy\n description: |\n Cilium BGP Peering Policy instructs + Cilium to create specific BGP peering\n configurations.\n- kind: CiliumBGPClusterConfig\n + \ version: v2alpha1\n name: ciliumbgpclusterconfigs.cilium.io\n displayName: + Cilium BGP Cluster Config\n description: |\n Cilium BGP Cluster Config + instructs Cilium operator to create specific BGP cluster\n configurations.\n- + kind: CiliumBGPPeerConfig\n version: v2alpha1\n name: ciliumbgppeerconfigs.cilium.io\n + \ displayName: Cilium BGP Peer Config\n description: |\n CiliumBGPPeerConfig + is a common set of BGP peer configurations. It can be referenced \n by + multiple peers from CiliumBGPClusterConfig.\n- kind: CiliumBGPAdvertisement\n + \ version: v2alpha1\n name: ciliumbgpadvertisements.cilium.io\n displayName: + Cilium BGP Advertisement\n description: |\n CiliumBGPAdvertisement is + used to define source of BGP advertisement as well as BGP attributes \n to + be advertised with those prefixes.\n- kind: CiliumBGPNodeConfig\n version: + v2alpha1\n name: ciliumbgpnodeconfigs.cilium.io\n displayName: Cilium BGP + Node Config\n description: |\n CiliumBGPNodeConfig is read only node specific + BGP configuration. It is constructed by Cilium operator.\n It will also + contain node local BGP state information.\n- kind: CiliumBGPNodeConfigOverride\n + \ version: v2alpha1\n name: ciliumbgpnodeconfigoverrides.cilium.io\n displayName: + Cilium BGP Node Config Override\n description: |\n CiliumBGPNodeConfigOverride + can be used to override node specific BGP configuration.\n- kind: CiliumLoadBalancerIPPool\n + \ version: v2alpha1\n name: ciliumloadbalancerippools.cilium.io\n displayName: + Cilium Load Balancer IP Pool\n description: |\n Defining a Cilium Load + Balancer IP Pool instructs Cilium to assign IPs to LoadBalancer Services.\n- + kind: CiliumNodeConfig\n version: v2alpha1\n name: ciliumnodeconfigs.cilium.io\n + \ displayName: Cilium Node Configuration\n description: |\n CiliumNodeConfig + is a list of configuration key-value pairs. It is applied to\n nodes indicated + by a label selector.\n- kind: CiliumCIDRGroup\n version: v2alpha1\n name: + ciliumcidrgroups.cilium.io\n displayName: Cilium CIDR Group\n description: + |\n CiliumCIDRGroup is a list of CIDRs that can be referenced as a single + entity from CiliumNetworkPolicies.\n- kind: CiliumL2AnnouncementPolicy\n version: + v2alpha1\n name: ciliuml2announcementpolicies.cilium.io\n displayName: Cilium + L2 Announcement Policy\n description: |\n CiliumL2AnnouncementPolicy is + a policy which determines which service IPs will be announced to\n the + local area network, by which nodes, and via which interfaces.\n- kind: CiliumPodIPPool\n + \ version: v2alpha1\n name: ciliumpodippools.cilium.io\n displayName: Cilium + Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that + can be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n" + apiVersion: v2 + appVersion: 1.15.10 + created: "2024-10-11T23:00:04.066907249Z" + description: eBPF-based Networking, Security, and Observability + digest: c8bbcb8d5a7e566c05a9e827108f97b5baa9f74e28f16d551a983d42a57ff94f + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.15.10.tgz + version: 1.15.10 - annotations: artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n \ displayName: Cilium Network Policy\n description: |\n Cilium Network @@ -3171,6 +3489,151 @@ entries: description: | CiliumPodIPPool defines an IP pool that can be used for pooled IPAM (i.e. the multi-pool IPAM mode). apiVersion: v2 + appVersion: 1.14.16 + created: "2024-10-11T21:38:37.489766156Z" + description: eBPF-based Networking, Security, and Observability + digest: b6b755176cb61d31b32d5107bedffef51b1d2936d6de5eed6036815fca6834e4 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.14.16.tgz + version: 1.14.16 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicies.cilium.io + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicies.cilium.io + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumexternalworkloads.cilium.io + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicies.cilium.io + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnodes.cilium.io + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentities.cilium.io + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoints.cilium.io + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + - kind: CiliumEndpointSlice + version: v2alpha1 + name: ciliumendpointslices.cilium.io + displayName: Cilium Endpoint Slice + description: | + Cilium Endpoint Slice represents the status of groups of pods or nodes + in the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + - kind: CiliumEgressGatewayPolicy + version: v2 + name: ciliumegressgatewaypolicies.cilium.io + displayName: Cilium Egress Gateway Policy + description: | + Cilium Egress Gateway Policy provides control over the way that traffic + leaves the cluster and which source addresses to use for that traffic. + - kind: CiliumClusterwideEnvoyConfig + version: v2 + name: ciliumclusterwideenvoyconfigs.cilium.io + displayName: Cilium Clusterwide Envoy Config + description: | + Cilium Clusterwide Envoy Config specifies Envoy resources and K8s service mappings + to be provisioned into Cilium host proxy instances in cluster context. + - kind: CiliumEnvoyConfig + version: v2 + name: ciliumenvoyconfigs.cilium.io + displayName: Cilium Envoy Config + description: | + Cilium Envoy Config specifies Envoy resources and K8s service mappings + to be provisioned into Cilium host proxy instances in namespace context. + - kind: CiliumBGPPeeringPolicy + version: v2alpha1 + name: ciliumbgppeeringpolicies.cilium.io + displayName: Cilium BGP Peering Policy + description: | + Cilium BGP Peering Policy instructs Cilium to create specific BGP peering + configurations. + - kind: CiliumLoadBalancerIPPool + version: v2alpha1 + name: ciliumloadbalancerippools.cilium.io + displayName: Cilium Load Balancer IP Pool + description: | + Defining a Cilium Load Balancer IP Pool instructs Cilium to assign IPs to LoadBalancer Services. + - kind: CiliumNodeConfig + version: v2alpha1 + name: ciliumnodeconfigs.cilium.io + displayName: Cilium Node Configuration + description: | + CiliumNodeConfig is a list of configuration key-value pairs. It is applied to + nodes indicated by a label selector. + - kind: CiliumCIDRGroup + version: v2alpha1 + name: ciliumcidrgroups.cilium.io + displayName: Cilium CIDR Group + description: | + CiliumCIDRGroup is a list of CIDRs that can be referenced as a single entity from CiliumNetworkPolicies. + - kind: CiliumL2AnnouncementPolicy + version: v2alpha1 + name: ciliuml2announcementpolicies.cilium.io + displayName: Cilium L2 Announcement Policy + description: | + CiliumL2AnnouncementPolicy is a policy which determines which service IPs will be announced to + the local area network, by which nodes, and via which interfaces. + - kind: CiliumPodIPPool + version: v2alpha1 + name: ciliumpodippools.cilium.io + displayName: Cilium Pod IP Pool + description: | + CiliumPodIPPool defines an IP pool that can be used for pooled IPAM (i.e. the multi-pool IPAM mode). + apiVersion: v2 appVersion: 1.14.15 created: "2024-09-26T11:36:56.83422162Z" description: eBPF-based Networking, Security, and Observability @@ -20091,4 +20554,4 @@ entries: urls: - tetragon-0.8.0.tgz version: 0.8.0 -generated: "2024-09-26T11:51:56.299230787Z" +generated: "2024-10-11T23:02:59.483940884Z" diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS index 58d66d52de..77b10f4780 100644 --- a/vendor/github.com/cilium/cilium/AUTHORS +++ b/vendor/github.com/cilium/cilium/AUTHORS @@ -13,6 +13,7 @@ Aditi Ghag aditi@cilium.io Aditya Kumar aditya.kumar60@infosys.com Aditya Purandare aditya.p1993@hotmail.com Aditya Sharma aditya.sharma@shopify.com +Adrian Berger adrian.berger@bedag.ch Adrien Trouillaud adrienjt@users.noreply.github.com Ahmed Bebars 1381372+abebars@users.noreply.github.com Akhil Velagapudi 4@4khil.com @@ -25,6 +26,7 @@ Alexander Alemayhu alexander@alemayhu.com Alexander Berger alex-berger@gmx.ch Alexander Block ablock84@gmail.com Alexander Demichev demichev.alexander@gmail.com +Alexandre Barone abalexandrebarone@gmail.com Alexandre Perrin alex@isovalent.com Alexei Starovoitov alexei.starovoitov@gmail.com Alexey Grevtsev alexey.grevtcev@gmail.com @@ -32,6 +34,7 @@ Alex Katsman alexkats@google.com Alex Romanov alex@romanov.ws Alex Szakaly alex.szakaly@gmail.com Alex Waring alex.waring@starlingbank.com +alisdairbr alisdairbr@users.noreply.github.com Alkama Hasan gl3118@myamu.ac.in Alois Petutschnig alois@petutschnig.net Alvaro Uria alvaro.uria@isovalent.com @@ -43,6 +46,7 @@ Amre Shakimov amre@covalent.io Anderson, David L david.l.anderson@intel.com Andor Nemeth andor_nemeth@swissre.com Andreas Mårtensson andreas@addem.se +André Costa ancosta@gmail.com Andree Klattenhoff mail@andr.ee Andrei Kvapil kvapss@gmail.com André Martins andre@cilium.io @@ -56,7 +60,7 @@ Andrey Devyatkin andrey.devyatkin@fivexl.io Andrey Klimentyev andrey.klimentyev@flant.com Andrey Maltsev maltsev.andrey@gmail.com Andrey Voronkov voronkovaa@gmail.com -Andrii Iuspin andrii.iuspin@isovalent.com +Andrii Iuspin 57713382+ayuspin@users.noreply.github.com Andrzej Mamak nqaegg@gmail.com Andy Allred andy@punasusi.com andychuang andy.chuang@shoplineapp.com @@ -83,8 +87,10 @@ Archana Shinde archana.m.shinde@intel.com Archer Wu archerwu9425@icloud.com Ardika Bagus me@ardikabs.com Arika Chen eaglesora@gmail.com +Arkadiusz Kaliwoda (akaliwod) akaliwod@cisco.com Arnaud Meukam ameukam@gmail.com Arseniy Belorukov a.belorukov@team.bumble.com +Artem Tokarev enjoy1288@gmail.com Arthur Chiao arthurchiao@hotmail.com ArthurChiao arthurchiao@hotmail.com Arthur Evstifeev mail@ap4y.me @@ -180,6 +186,7 @@ Cory Snyder csnyder@1111systems.com Craig Box craig.box@gmail.com crashiura crashiura@gmail.com cui fliter imcusg@gmail.com +cx 1249843194@qq.com Cynthia Thomas cynthia@covalent.io Cyril Corbon corboncyril@gmail.com Cyril Scetbon cscetbon@gmail.com @@ -258,6 +265,7 @@ Dylan Reimerink dylan.reimerink@isovalent.com Ekene Nwobodo nwobodoe71@gmail.com Electron alokaks601@gmail.com El-Fadel Bonfoh elfadel@accuknox.com +Elias Hernandez elirayhernandez@gmail.com eliranw 39266788+eliranw@users.noreply.github.com Ellie Springsteen ellie.springsteen@appian.com Eloy Coto eloy.coto@acalustra.com @@ -411,6 +419,7 @@ John Fastabend john.fastabend@gmail.com John Gardiner Myers jgmyers@proofpoint.com John Howard howardjohn@google.com John Karoyannis karoyannis@yahoo.com +john-r-swyftx john.roche@swyftx.com.au John Watson johnw@planetscale.com John Zheng johnzhengaz@gmail.com Jomen Xiao jomenxiao@gmail.com @@ -426,6 +435,7 @@ Joseph-Irving joseph.irving500@gmail.com Joseph Ligier joseph.ligier@accenture.com Joseph Sheng jiajun.sheng@microfocus.com Joseph Stevens thejosephstevens@gmail.com +Josh Soref 2119212+jsoref@users.noreply.github.com joshua 54235339+sujoshua@users.noreply.github.com Joshua Roppo joshroppo@gmail.com jshr-w shjayaraman@microsoft.com @@ -550,6 +560,7 @@ Matt Anderson matanderson@equinix.com Matthew Fenwick mfenwick100@gmail.com Matthew Gumport me@gum.pt Matthew Hembree 47449406+matthewhembree@users.noreply.github.com +Matthias Baur m.baur@syseleven.de Matthieu Antoine matthieu.antoine@jumo.world Matthieu MOREL matthieu.morel35@gmail.com Matt Layher mdlayher@gmail.com @@ -613,6 +624,7 @@ Nick Young nick@isovalent.com Niclas Mietz solidnerd@users.noreply.github.com Nico Berlee nico.berlee@on2it.net Nicolas Busseneau nicolas@isovalent.com +Nicolò Ciraci ciraci.nicolo@gmail.com Nico Vibert nicolas.vibert@isovalent.com Nikhil Jha nikhiljha@users.noreply.github.com Nikhil Sharma nikhilsharma230303@gmail.com @@ -639,9 +651,10 @@ Oliver Wang a0924100192@gmail.com Omar Aloraini ooraini.dev@gmail.com Ondrej Blazek ondrej.blazek@firma.seznam.cz Ondrej Sika ondrej@ondrejsika.com +oneumyvakin oneumyvaking@mail.ru Oshan Galwaduge oshan304@gmail.com Osthues osthues.matthias@gmail.com -Ovidiu Tirla ovi2022@gmail.com +Ovidiu Tirla otirla@google.com Pablo Ruiz pablo.ruiz@gmail.com Paco Xu paco.xu@daocloud.io Parth Patel parth.psu@gmail.com @@ -660,11 +673,13 @@ Paulo Gomes pjbgf@linux.com Pavel Pavlov 40396270+PavelPavlov46@users.noreply.github.com Pavel Tishkov pavel.tishkov@flant.com Paweł Prażak pawelprazak@users.noreply.github.com +Pedro Ignacio pedroig100.pi@gmail.com Peiqi Shi uestc.shi@gmail.com Pelle van Gils pelle@vangils.dev pengbinbin1 pengbiny@163.com Pengfei Song pengfei.song@daocloud.io Peter Jausovec peter.jausovec@solo.io +Peter Matulis pmatulis@gmail.com Peter Slovak slovak.peto@gmail.com Petr Baloun petr.baloun@firma.seznam.cz Philippe Lafoucrière philippe.lafoucriere@gmail.com @@ -672,6 +687,7 @@ Philipp Gniewosz philipp.gniewosz@daimlertruck.com Philip Schmid phisch@cisco.com Pierre-Yves Aillet pyaillet@gmail.com Pieter van der Giessen pieter@pionative.com +Pooja Trivedi poojatrivedi@gmail.com Prabhakhar Kaliyamurthy (PK) prabhakhar@gmail.com Pranavi Roy pranvyr@gmail.com Prashanth.B beeps@google.com @@ -682,6 +698,7 @@ Priya Sharma Priya.Sharma6693@gmail.com Qasim Sarfraz qasim.sarfraz@esailors.de Qifeng Guo qifeng.guo@daocloud.io Qingchuan Hao qinhao@microsoft.com +Quan Wei quanwei.153@bytedance.com Quentin Monnet qmo@qmon.net Raam ram29@bskyb.com Rachid Zarouali rachid.zarouali@sevensphere.io @@ -727,12 +744,15 @@ Roman Ptitcyn romanspb@yahoo.com Romuald Zdebskiy zdebskiy@hotmail.com Ronald van Zantvoort the.loeki@gmail.com Ross Guarino rssguar@gmail.com +roykharman roykharman@gmail.com Rui Chen rui@chenrui.dev Rui Gu rui@covalent.io Rushikesh Butley rushikeshbutley@gmail.com Russell Bryant russell@russellbryant.net +rusttech gopher@before.tech Ryan Drew ryan.drew@isovalent.com Ryan McNamara rmcnamara@palantir.com +ryebridge 88094554+ryebridge@users.noreply.github.com Sachin Maurya sachin.maurya7666@gmail.com Sadik Kuzu sadikkuzu@hotmail.com Sahid Orentino Ferdjaoui sahid.ferdjaoui@industrialdiscipline.com @@ -773,6 +793,7 @@ Simone Magnani simone.magnani@isovalent.com Simone Sciarrati s.sciarrati@gmail.com Simon Felding 45149055+simonfelding@users.noreply.github.com Simon Gerber simon.gerber@vshn.ch +Simon Lackerbauer mail@ciil.io Simon Pasquier spasquier@mirantis.com Sjouke de Vries info@sdvservices.nl SkalaNetworks contact@skala.network @@ -781,6 +802,7 @@ Smaine Kahlouch smainklh@gmail.com soggiest nicholas@isovalent.com Song 1120344670@qq.com spacewander spacewanderlzx@gmail.com +Sridhar K N Rao sridharkn@u.nus.edu ssttehrani ssttehrani@gmail.com Stacy Kim stacy.kim@ucla.edu Stefan Zwanenburg stefan@zwanenburg.info @@ -816,11 +838,13 @@ Thiago Navarro navarro@accuknox.com Thi Van Le vannnyle@gmail.com Thomas Bachman tbachman@yahoo.com Thomas Balthazar thomas@balthazar.info +thomas.chen thomas.chen@trustasia.com Thomas Gosteli thomas.gosteli@protonmail.com Thomas Graf thomas@cilium.io Thorben von Hacht tvonhacht@apple.com Thorsten Pfister thorsten.pfister@form3.tech tigerK yanru.lv@daocloud.io +Tilusch til.heini@swisscom.com Tim Horner timothy.horner@isovalent.com Timo Beckers timo@isovalent.com Timo Reimann ttr314@googlemail.com @@ -842,6 +866,7 @@ Tony Lu tonylu@linux.alibaba.com Tony Norlin tony.norlin@localdomain.se Torben Tretau torben@tretau.net Tore S. Loenoey tore.lonoy@gmail.com +ToroNZ tomas-github@maggio.nz toVersus toversus2357@gmail.com Travis Glenn Hansen travisghansen@yahoo.com Trevor Roberts Jr Trevor.Roberts.Jr@gmail.com @@ -856,6 +881,7 @@ Vadim Ponomarev velizarx@gmail.com vakr vakr@microsoft.com Valas Valancius valas@google.com Vance Li vanceli@tencent.com +Vanilla osu_Vanilla@126.com Vigneshwaren Sunder vickymailed@gmail.com Viktor Kurchenko viktor.kurchenko@isovalent.com Viktor Kuzmin kvaster@gmail.com diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go index bedbb29a45..8dfda1ba61 100644 --- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 -// protoc v5.28.2 +// protoc-gen-go v1.35.1 +// protoc v5.28.3 // source: flow/flow.proto package flow @@ -1475,11 +1475,9 @@ type Flow struct { func (x *Flow) Reset() { *x = Flow{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Flow) String() string { @@ -1490,7 +1488,7 @@ func (*Flow) ProtoMessage() {} func (x *Flow) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1797,9 @@ type FileInfo struct { func (x *FileInfo) Reset() { *x = FileInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileInfo) String() string { @@ -1814,7 +1810,7 @@ func (*FileInfo) ProtoMessage() {} func (x *FileInfo) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1860,11 +1856,9 @@ type Layer4 struct { func (x *Layer4) Reset() { *x = Layer4{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Layer4) String() string { @@ -1875,7 +1869,7 @@ func (*Layer4) ProtoMessage() {} func (x *Layer4) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1988,11 +1982,9 @@ type Layer7 struct { func (x *Layer7) Reset() { *x = Layer7{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Layer7) String() string { @@ -2003,7 +1995,7 @@ func (*Layer7) ProtoMessage() {} func (x *Layer7) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2096,11 +2088,9 @@ type TraceContext struct { func (x *TraceContext) Reset() { *x = TraceContext{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TraceContext) String() string { @@ -2111,7 +2101,7 @@ func (*TraceContext) ProtoMessage() {} func (x *TraceContext) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2146,11 +2136,9 @@ type TraceParent struct { func (x *TraceParent) Reset() { *x = TraceParent{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TraceParent) String() string { @@ -2161,7 +2149,7 @@ func (*TraceParent) ProtoMessage() {} func (x *TraceParent) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2200,11 +2188,9 @@ type Endpoint struct { func (x *Endpoint) Reset() { *x = Endpoint{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Endpoint) String() string { @@ -2215,7 +2201,7 @@ func (*Endpoint) ProtoMessage() {} func (x *Endpoint) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2290,11 +2276,9 @@ type Workload struct { func (x *Workload) Reset() { *x = Workload{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Workload) String() string { @@ -2305,7 +2289,7 @@ func (*Workload) ProtoMessage() {} func (x *Workload) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2346,11 +2330,9 @@ type TCP struct { func (x *TCP) Reset() { *x = TCP{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TCP) String() string { @@ -2361,7 +2343,7 @@ func (*TCP) ProtoMessage() {} func (x *TCP) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2415,11 +2397,9 @@ type IP struct { func (x *IP) Reset() { *x = IP{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IP) String() string { @@ -2430,7 +2410,7 @@ func (*IP) ProtoMessage() {} func (x *IP) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2491,11 +2471,9 @@ type Ethernet struct { func (x *Ethernet) Reset() { *x = Ethernet{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Ethernet) String() string { @@ -2506,7 +2484,7 @@ func (*Ethernet) ProtoMessage() {} func (x *Ethernet) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2553,11 +2531,9 @@ type TCPFlags struct { func (x *TCPFlags) Reset() { *x = TCPFlags{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TCPFlags) String() string { @@ -2568,7 +2544,7 @@ func (*TCPFlags) ProtoMessage() {} func (x *TCPFlags) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2657,11 +2633,9 @@ type UDP struct { func (x *UDP) Reset() { *x = UDP{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UDP) String() string { @@ -2672,7 +2646,7 @@ func (*UDP) ProtoMessage() {} func (x *UDP) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2712,11 +2686,9 @@ type SCTP struct { func (x *SCTP) Reset() { *x = SCTP{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SCTP) String() string { @@ -2727,7 +2699,7 @@ func (*SCTP) ProtoMessage() {} func (x *SCTP) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2767,11 +2739,9 @@ type ICMPv4 struct { func (x *ICMPv4) Reset() { *x = ICMPv4{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ICMPv4) String() string { @@ -2782,7 +2752,7 @@ func (*ICMPv4) ProtoMessage() {} func (x *ICMPv4) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2822,11 +2792,9 @@ type ICMPv6 struct { func (x *ICMPv6) Reset() { *x = ICMPv6{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ICMPv6) String() string { @@ -2837,7 +2805,7 @@ func (*ICMPv6) ProtoMessage() {} func (x *ICMPv6) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2880,11 +2848,9 @@ type Policy struct { func (x *Policy) Reset() { *x = Policy{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Policy) String() string { @@ -2895,7 +2861,7 @@ func (*Policy) ProtoMessage() {} func (x *Policy) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2964,11 +2930,9 @@ type EventTypeFilter struct { func (x *EventTypeFilter) Reset() { *x = EventTypeFilter{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EventTypeFilter) String() string { @@ -2979,7 +2943,7 @@ func (*EventTypeFilter) ProtoMessage() {} func (x *EventTypeFilter) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3033,11 +2997,9 @@ type CiliumEventType struct { func (x *CiliumEventType) Reset() { *x = CiliumEventType{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CiliumEventType) String() string { @@ -3048,7 +3010,7 @@ func (*CiliumEventType) ProtoMessage() {} func (x *CiliumEventType) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3180,11 +3142,9 @@ type FlowFilter struct { func (x *FlowFilter) Reset() { *x = FlowFilter{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FlowFilter) String() string { @@ -3195,7 +3155,7 @@ func (*FlowFilter) ProtoMessage() {} func (x *FlowFilter) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3502,11 +3462,9 @@ type DNS struct { func (x *DNS) Reset() { *x = DNS{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DNS) String() string { @@ -3517,7 +3475,7 @@ func (*DNS) ProtoMessage() {} func (x *DNS) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3599,11 +3557,9 @@ type HTTPHeader struct { func (x *HTTPHeader) Reset() { *x = HTTPHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HTTPHeader) String() string { @@ -3614,7 +3570,7 @@ func (*HTTPHeader) ProtoMessage() {} func (x *HTTPHeader) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3658,11 +3614,9 @@ type HTTP struct { func (x *HTTP) Reset() { *x = HTTP{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HTTP) String() string { @@ -3673,7 +3627,7 @@ func (*HTTP) ProtoMessage() {} func (x *HTTP) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3738,11 +3692,9 @@ type Kafka struct { func (x *Kafka) Reset() { *x = Kafka{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Kafka) String() string { @@ -3753,7 +3705,7 @@ func (*Kafka) ProtoMessage() {} func (x *Kafka) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3814,11 +3766,9 @@ type Service struct { func (x *Service) Reset() { *x = Service{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Service) String() string { @@ -3829,7 +3779,7 @@ func (*Service) ProtoMessage() {} func (x *Service) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3876,11 +3826,9 @@ type LostEvent struct { func (x *LostEvent) Reset() { *x = LostEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LostEvent) String() string { @@ -3891,7 +3839,7 @@ func (*LostEvent) ProtoMessage() {} func (x *LostEvent) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3948,11 +3896,9 @@ type AgentEvent struct { func (x *AgentEvent) Reset() { *x = AgentEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AgentEvent) String() string { @@ -3963,7 +3909,7 @@ func (*AgentEvent) ProtoMessage() {} func (x *AgentEvent) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4115,11 +4061,9 @@ type AgentEventUnknown struct { func (x *AgentEventUnknown) Reset() { *x = AgentEventUnknown{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AgentEventUnknown) String() string { @@ -4130,7 +4074,7 @@ func (*AgentEventUnknown) ProtoMessage() {} func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4169,11 +4113,9 @@ type TimeNotification struct { func (x *TimeNotification) Reset() { *x = TimeNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TimeNotification) String() string { @@ -4184,7 +4126,7 @@ func (*TimeNotification) ProtoMessage() {} func (x *TimeNotification) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4218,11 +4160,9 @@ type PolicyUpdateNotification struct { func (x *PolicyUpdateNotification) Reset() { *x = PolicyUpdateNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PolicyUpdateNotification) String() string { @@ -4233,7 +4173,7 @@ func (*PolicyUpdateNotification) ProtoMessage() {} func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4281,11 +4221,9 @@ type EndpointRegenNotification struct { func (x *EndpointRegenNotification) Reset() { *x = EndpointRegenNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EndpointRegenNotification) String() string { @@ -4296,7 +4234,7 @@ func (*EndpointRegenNotification) ProtoMessage() {} func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4346,11 +4284,9 @@ type EndpointUpdateNotification struct { func (x *EndpointUpdateNotification) Reset() { *x = EndpointUpdateNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EndpointUpdateNotification) String() string { @@ -4361,7 +4297,7 @@ func (*EndpointUpdateNotification) ProtoMessage() {} func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4428,11 +4364,9 @@ type IPCacheNotification struct { func (x *IPCacheNotification) Reset() { *x = IPCacheNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IPCacheNotification) String() string { @@ -4443,7 +4377,7 @@ func (*IPCacheNotification) ProtoMessage() {} func (x *IPCacheNotification) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4525,11 +4459,9 @@ type ServiceUpsertNotificationAddr struct { func (x *ServiceUpsertNotificationAddr) Reset() { *x = ServiceUpsertNotificationAddr{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceUpsertNotificationAddr) String() string { @@ -4540,7 +4472,7 @@ func (*ServiceUpsertNotificationAddr) ProtoMessage() {} func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4588,11 +4520,9 @@ type ServiceUpsertNotification struct { func (x *ServiceUpsertNotification) Reset() { *x = ServiceUpsertNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceUpsertNotification) String() string { @@ -4603,7 +4533,7 @@ func (*ServiceUpsertNotification) ProtoMessage() {} func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4692,11 +4622,9 @@ type ServiceDeleteNotification struct { func (x *ServiceDeleteNotification) Reset() { *x = ServiceDeleteNotification{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDeleteNotification) String() string { @@ -4707,7 +4635,7 @@ func (*ServiceDeleteNotification) ProtoMessage() {} func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4740,11 +4668,9 @@ type NetworkInterface struct { func (x *NetworkInterface) Reset() { *x = NetworkInterface{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NetworkInterface) String() string { @@ -4755,7 +4681,7 @@ func (*NetworkInterface) ProtoMessage() {} func (x *NetworkInterface) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4801,11 +4727,9 @@ type DebugEvent struct { func (x *DebugEvent) Reset() { *x = DebugEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DebugEvent) String() string { @@ -4816,7 +4740,7 @@ func (*DebugEvent) ProtoMessage() {} func (x *DebugEvent) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4908,11 +4832,9 @@ type FlowFilter_Experimental struct { func (x *FlowFilter_Experimental) Reset() { *x = FlowFilter_Experimental{} - if protoimpl.UnsafeEnabled { - mi := &file_flow_flow_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_flow_flow_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FlowFilter_Experimental) String() string { @@ -4923,7 +4845,7 @@ func (*FlowFilter_Experimental) ProtoMessage() {} func (x *FlowFilter_Experimental) ProtoReflect() protoreflect.Message { mi := &file_flow_flow_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6017,476 +5939,6 @@ func file_flow_flow_proto_init() { if File_flow_flow_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_flow_flow_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Flow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Layer4); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Layer7); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*TraceContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*TraceParent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*Endpoint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Workload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*TCP); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*IP); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*Ethernet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*TCPFlags); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*UDP); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*SCTP); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*ICMPv4); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*ICMPv6); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*Policy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*EventTypeFilter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*CiliumEventType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FlowFilter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*DNS); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*HTTPHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*HTTP); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*Kafka); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*Service); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*LostEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*AgentEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*AgentEventUnknown); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*TimeNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*PolicyUpdateNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*EndpointRegenNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*EndpointUpdateNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*IPCacheNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*ServiceUpsertNotificationAddr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*ServiceUpsertNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDeleteNotification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*NetworkInterface); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*DebugEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_flow_flow_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*FlowFilter_Experimental); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_flow_flow_proto_msgTypes[2].OneofWrappers = []any{ (*Layer4_TCP)(nil), (*Layer4_UDP)(nil), diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go index 09acc7a93e..cdcf09f39c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 -// protoc v5.28.2 +// protoc-gen-go v1.35.1 +// protoc v5.28.3 // source: observer/observer.proto package observer @@ -407,11 +407,9 @@ type ServerStatusRequest struct { func (x *ServerStatusRequest) Reset() { *x = ServerStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerStatusRequest) String() string { @@ -422,7 +420,7 @@ func (*ServerStatusRequest) ProtoMessage() {} func (x *ServerStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -474,11 +472,9 @@ type ServerStatusResponse struct { func (x *ServerStatusResponse) Reset() { *x = ServerStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerStatusResponse) String() string { @@ -489,7 +485,7 @@ func (*ServerStatusResponse) ProtoMessage() {} func (x *ServerStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,11 +609,9 @@ type GetFlowsRequest struct { func (x *GetFlowsRequest) Reset() { *x = GetFlowsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetFlowsRequest) String() string { @@ -628,7 +622,7 @@ func (*GetFlowsRequest) ProtoMessage() {} func (x *GetFlowsRequest) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -733,11 +727,9 @@ type GetFlowsResponse struct { func (x *GetFlowsResponse) Reset() { *x = GetFlowsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetFlowsResponse) String() string { @@ -748,7 +740,7 @@ func (*GetFlowsResponse) ProtoMessage() {} func (x *GetFlowsResponse) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -854,11 +846,9 @@ type GetAgentEventsRequest struct { func (x *GetAgentEventsRequest) Reset() { *x = GetAgentEventsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetAgentEventsRequest) String() string { @@ -869,7 +859,7 @@ func (*GetAgentEventsRequest) ProtoMessage() {} func (x *GetAgentEventsRequest) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -934,11 +924,9 @@ type GetAgentEventsResponse struct { func (x *GetAgentEventsResponse) Reset() { *x = GetAgentEventsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetAgentEventsResponse) String() string { @@ -949,7 +937,7 @@ func (*GetAgentEventsResponse) ProtoMessage() {} func (x *GetAgentEventsResponse) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1008,11 +996,9 @@ type GetDebugEventsRequest struct { func (x *GetDebugEventsRequest) Reset() { *x = GetDebugEventsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDebugEventsRequest) String() string { @@ -1023,7 +1009,7 @@ func (*GetDebugEventsRequest) ProtoMessage() {} func (x *GetDebugEventsRequest) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1088,11 +1074,9 @@ type GetDebugEventsResponse struct { func (x *GetDebugEventsResponse) Reset() { *x = GetDebugEventsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDebugEventsResponse) String() string { @@ -1103,7 +1087,7 @@ func (*GetDebugEventsResponse) ProtoMessage() {} func (x *GetDebugEventsResponse) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1147,11 +1131,9 @@ type GetNodesRequest struct { func (x *GetNodesRequest) Reset() { *x = GetNodesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNodesRequest) String() string { @@ -1162,7 +1144,7 @@ func (*GetNodesRequest) ProtoMessage() {} func (x *GetNodesRequest) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1189,11 +1171,9 @@ type GetNodesResponse struct { func (x *GetNodesResponse) Reset() { *x = GetNodesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNodesResponse) String() string { @@ -1204,7 +1184,7 @@ func (*GetNodesResponse) ProtoMessage() {} func (x *GetNodesResponse) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1254,11 +1234,9 @@ type Node struct { func (x *Node) Reset() { *x = Node{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Node) String() string { @@ -1269,7 +1247,7 @@ func (*Node) ProtoMessage() {} func (x *Node) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1362,11 +1340,9 @@ type TLS struct { func (x *TLS) Reset() { *x = TLS{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TLS) String() string { @@ -1377,7 +1353,7 @@ func (*TLS) ProtoMessage() {} func (x *TLS) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1414,11 +1390,9 @@ type GetNamespacesRequest struct { func (x *GetNamespacesRequest) Reset() { *x = GetNamespacesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNamespacesRequest) String() string { @@ -1429,7 +1403,7 @@ func (*GetNamespacesRequest) ProtoMessage() {} func (x *GetNamespacesRequest) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1456,11 +1430,9 @@ type GetNamespacesResponse struct { func (x *GetNamespacesResponse) Reset() { *x = GetNamespacesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNamespacesResponse) String() string { @@ -1471,7 +1443,7 @@ func (*GetNamespacesResponse) ProtoMessage() {} func (x *GetNamespacesResponse) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1504,11 +1476,9 @@ type Namespace struct { func (x *Namespace) Reset() { *x = Namespace{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Namespace) String() string { @@ -1519,7 +1489,7 @@ func (*Namespace) ProtoMessage() {} func (x *Namespace) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1571,11 +1541,9 @@ type ExportEvent struct { func (x *ExportEvent) Reset() { *x = ExportEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExportEvent) String() string { @@ -1586,7 +1554,7 @@ func (*ExportEvent) ProtoMessage() {} func (x *ExportEvent) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1717,11 +1685,9 @@ type GetFlowsRequest_Experimental struct { func (x *GetFlowsRequest_Experimental) Reset() { *x = GetFlowsRequest_Experimental{} - if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_observer_observer_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetFlowsRequest_Experimental) String() string { @@ -1732,7 +1698,7 @@ func (*GetFlowsRequest_Experimental) ProtoMessage() {} func (x *GetFlowsRequest_Experimental) ProtoReflect() protoreflect.Message { mi := &file_observer_observer_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2089,212 +2055,6 @@ func file_observer_observer_proto_init() { if File_observer_observer_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_observer_observer_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ServerStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*GetFlowsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*GetFlowsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*GetAgentEventsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*GetAgentEventsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*GetDebugEventsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*GetDebugEventsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*GetNodesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*GetNodesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*Node); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*TLS); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*GetNamespacesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*GetNamespacesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*Namespace); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*ExportEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_observer_observer_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*GetFlowsRequest_Experimental); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_observer_observer_proto_msgTypes[3].OneofWrappers = []any{ (*GetFlowsResponse_Flow)(nil), (*GetFlowsResponse_NodeStatus)(nil), diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go index a6838e8843..1cda804916 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.5.1 -// - protoc v5.28.2 +// - protoc v5.28.3 // source: observer/observer.proto package observer diff --git a/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go b/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go index 3bd1ec8491..cba6fcf319 100644 --- a/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 -// protoc v5.28.2 +// protoc-gen-go v1.35.1 +// protoc v5.28.3 // source: relay/relay.proto package relay @@ -107,11 +107,9 @@ type NodeStatusEvent struct { func (x *NodeStatusEvent) Reset() { *x = NodeStatusEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_relay_relay_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_relay_relay_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NodeStatusEvent) String() string { @@ -122,7 +120,7 @@ func (*NodeStatusEvent) ProtoMessage() {} func (x *NodeStatusEvent) ProtoReflect() protoreflect.Message { mi := &file_relay_relay_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -215,20 +213,6 @@ func file_relay_relay_proto_init() { if File_relay_relay_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_relay_relay_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*NodeStatusEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/cilium/cilium/cilium-cli/cli/clustermesh.go b/vendor/github.com/cilium/cilium/cilium-cli/cli/clustermesh.go index 4677c1e938..adf3cf08a2 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/cli/clustermesh.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/cli/clustermesh.go @@ -337,4 +337,5 @@ func addCommonConnectFlags(cmd *cobra.Command, params *clustermesh.Parameters) { cmd.Flags().StringSliceVar(¶ms.DestinationContext, "destination-context", []string{}, "Comma separated list of Kubernetes configuration contexts of destination cluster") cmd.Flags().StringSliceVar(¶ms.DestinationEndpoints, "destination-endpoint", []string{}, "IP of ClusterMesh service of destination cluster") cmd.Flags().StringSliceVar(¶ms.SourceEndpoints, "source-endpoint", []string{}, "IP of ClusterMesh service of source cluster") + cmd.Flags().IntVar(¶ms.Parallel, "parallel", 1, "Number of parallel connection of destination cluster") } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/cli/cmd.go b/vendor/github.com/cilium/cilium/cilium-cli/cli/cmd.go index 4159f3cd28..592c64de81 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/cli/cmd.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/cli/cmd.go @@ -18,6 +18,7 @@ var ( contextName string namespace string helmReleaseName string + kubeConfig string k8sClient *k8s.Client ) @@ -44,7 +45,7 @@ func NewCiliumCommand(hooks api.Hooks) *cobra.Command { } } - c, err := k8s.NewClient(contextName, "", namespace) + c, err := k8s.NewClient(contextName, kubeConfig, namespace) if err != nil { return fmt.Errorf("unable to create Kubernetes client: %w", err) } @@ -84,6 +85,7 @@ cilium connectivity test`, cmd.PersistentFlags().StringVar(&contextName, "context", "", "Kubernetes configuration context") cmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", "kube-system", "Namespace Cilium is running in") cmd.PersistentFlags().StringVar(&helmReleaseName, "helm-release-name", "cilium", "Helm release name") + cmd.PersistentFlags().StringVar(&kubeConfig, "kubeconfig", "", "Path to the kubeconfig file") cmd.AddCommand( newCmdBgp(), diff --git a/vendor/github.com/cilium/cilium/cilium-cli/cli/connectivity.go b/vendor/github.com/cilium/cilium/cilium-cli/cli/connectivity.go index 9565b07bfc..a7214c9009 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/cli/connectivity.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/cli/connectivity.go @@ -152,6 +152,7 @@ func newCmdConnectivityTest(hooks api.Hooks) *cobra.Command { cmd.Flags().StringVar(¶ms.DNSTestServerImage, "dns-test-server-image", defaults.ConnectivityDNSTestServerImage, "Image path to use for CoreDNS") cmd.Flags().StringVar(¶ms.TestConnDisruptImage, "test-conn-disrupt-image", defaults.ConnectivityTestConnDisruptImage, "Image path to use for connection disruption tests") cmd.Flags().StringVar(¶ms.FRRImage, "frr-image", defaults.ConnectivityTestFRRImage, "Image path to use for FRR") + cmd.Flags().StringVar(¶ms.SocatImage, "socat-image", defaults.ConnectivityTestSocatImage, "Image path to use for multicast tests") cmd.Flags().UintVar(¶ms.Retry, "retry", defaults.ConnectRetry, "Number of retries on connection failure to external targets") cmd.Flags().DurationVar(¶ms.RetryDelay, "retry-delay", defaults.ConnectRetryDelay, "Delay between retries for external targets") diff --git a/vendor/github.com/cilium/cilium/cilium-cli/cli/hubble.go b/vendor/github.com/cilium/cilium/cilium-cli/cli/hubble.go index a22d835832..3d8d6333e2 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/cli/hubble.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/cli/hubble.go @@ -6,6 +6,7 @@ package cli import ( "context" "os" + "os/signal" "github.com/spf13/cobra" @@ -37,11 +38,11 @@ func newCmdPortForwardCommand() *cobra.Command { Use: "port-forward", Short: "Forward the relay port to the local machine", Long: ``, - RunE: func(_ *cobra.Command, _ []string) error { - params.Context = contextName - params.Namespace = namespace - ctx := context.Background() + RunE: func(cmd *cobra.Command, _ []string) error { + ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, os.Kill) + defer cancel() + params.Namespace = namespace if err := params.RelayPortForwardCommand(ctx, k8sClient); err != nil { fatalf("Unable to port forward: %s", err) } @@ -49,7 +50,7 @@ func newCmdPortForwardCommand() *cobra.Command { }, } - cmd.Flags().IntVar(¶ms.PortForward, "port-forward", 4245, "Local port to forward to") + cmd.Flags().IntVar(¶ms.PortForward, "port-forward", 4245, "Local port to forward to. 0 will select a random port.") return cmd } @@ -62,18 +63,19 @@ func newCmdUI() *cobra.Command { cmd := &cobra.Command{ Use: "ui", Short: "Open the Hubble UI", - RunE: func(_ *cobra.Command, _ []string) error { - params.Context = contextName - params.Namespace = namespace + RunE: func(cmd *cobra.Command, _ []string) error { + ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, os.Kill) + defer cancel() - if err := params.UIPortForwardCommand(); err != nil { + params.Namespace = namespace + if err := params.UIPortForwardCommand(ctx, k8sClient); err != nil { fatalf("Unable to port forward: %s", err) } return nil }, } - cmd.Flags().IntVar(¶ms.UIPortForward, "port-forward", 12000, "Local port to use for the port forward") + cmd.Flags().IntVar(¶ms.UIPortForward, "port-forward", 12000, "Local port to forward to. 0 will select a random port.") cmd.Flags().BoolVar(¶ms.UIOpenBrowser, "open-browser", true, "When --open-browser=false is supplied, cilium Hubble UI will not open the browser") return cmd diff --git a/vendor/github.com/cilium/cilium/cilium-cli/cli/install.go b/vendor/github.com/cilium/cilium/cilium-cli/cli/install.go index 0c0f73c27e..3e53ab095b 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/cli/install.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/cli/install.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os" + "time" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -17,7 +18,6 @@ import ( "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/hubble" "github.com/cilium/cilium/cilium-cli/install" - "github.com/cilium/cilium/pkg/inctimer" ) // addCommonInstallFlags adds install command flags that are shared between install and upgrade commands. @@ -137,7 +137,7 @@ func newCmdUninstallWithHelm() *cobra.Command { break } select { - case <-inctimer.After(defaults.WaitRetryInterval): + case <-time.After(defaults.WaitRetryInterval): case <-ctx.Done(): fatalf("Timed out waiting for Hubble Pods to terminate") } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/clustermesh/clustermesh.go b/vendor/github.com/cilium/cilium/cilium-cli/clustermesh/clustermesh.go index 2ff1798373..37a71be38f 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/clustermesh/clustermesh.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/clustermesh/clustermesh.go @@ -20,6 +20,7 @@ import ( "slices" "strconv" "strings" + "sync" "text/tabwriter" "time" @@ -37,6 +38,7 @@ import ( "github.com/cilium/cilium/cilium-cli/status" "github.com/cilium/cilium/cilium-cli/utils/wait" ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + "github.com/cilium/cilium/pkg/lock" ) const ( @@ -78,7 +80,7 @@ type k8sClusterMeshImplementation interface { CreateCiliumExternalWorkload(ctx context.Context, cew *ciliumv2.CiliumExternalWorkload, opts metav1.CreateOptions) (*ciliumv2.CiliumExternalWorkload, error) DeleteCiliumExternalWorkload(ctx context.Context, name string, opts metav1.DeleteOptions) error ListCiliumEndpoints(ctx context.Context, namespace string, options metav1.ListOptions) (*ciliumv2.CiliumEndpointList, error) - CiliumLogs(ctx context.Context, namespace, pod string, since time.Time) (string, error) + CiliumLogs(ctx context.Context, namespace, pod string, since time.Time, previous bool) (string, error) } type K8sClusterMesh struct { @@ -100,6 +102,7 @@ type Parameters struct { WaitDuration time.Duration DestinationEndpoints []string SourceEndpoints []string + Parallel int Writer io.Writer Labels map[string]string IPv4AllocCIDR string @@ -1823,13 +1826,35 @@ func (k *K8sClusterMesh) connectRemoteWithHelm(ctx context.Context, localCluster cn = append(state.remoteClusterNames, localClusterName) } + maxGoroutines := k.params.Parallel + + sem := make(chan struct{}, maxGoroutines) + var wg sync.WaitGroup + var mu lock.Mutex + var firstErr error + for aiClusterName, remoteClient := range rc { - err := k.connectSingleRemoteWithHelm(ctx, remoteClient, cn, helmValues[aiClusterName]) - if err != nil { - return err - } + wg.Add(1) + + sem <- struct{}{} + + go func(cn []string, rc *k8s.Client, helmVals map[string]interface{}) { + defer wg.Done() + defer func() { <-sem }() + + if err := k.connectSingleRemoteWithHelm(ctx, rc, cn, helmVals); err != nil { + mu.Lock() + if firstErr == nil { + firstErr = err + } + mu.Unlock() + } + }(cn, remoteClient, helmValues[aiClusterName]) } - return nil + + wg.Wait() + + return firstErr } func (k *K8sClusterMesh) connectSingleRemoteWithHelm(ctx context.Context, remoteClient *k8s.Client, clusterNames []string, helmValues map[string]interface{}) error { diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/builder.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/builder.go index ce55bfef20..685159ec05 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/builder.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/builder.go @@ -54,6 +54,9 @@ var ( //go:embed manifests/client-egress-to-cidr-external-deny.yaml clientEgressToCIDRExternalDenyPolicyYAML string + //go:embed manifests/client-egress-to-cidrgroup-external-deny.yaml + clientEgressToCIDRGroupExternalDenyPolicyYAML string + //go:embed manifests/client-egress-l7-http.yaml clientEgressL7HTTPPolicyYAML string @@ -210,6 +213,7 @@ func concurrentTests(connTests []*check.ConnectivityTest) error { clientWithServiceAccountEgressToEchoDeny{}, clientEgressToEchoServiceAccountDeny{}, clientEgressToCidrDeny{}, + clientEgressToCidrgroupDeny{}, clientEgressToCidrDenyDefault{}, clusterMeshEndpointSliceSync{}, health{}, @@ -250,6 +254,8 @@ func concurrentTests(connTests []*check.ConnectivityTest) error { localRedirectPolicyWithNodeDNS{}, noFragmentation{}, bgpControlPlane{}, + multicast{}, + strictModeEncryption{}, } return injectTests(tests, connTests...) } @@ -274,6 +280,7 @@ func renderTemplates(param check.Parameters) (map[string]string, error) { "clientEgressToCIDRExternalPolicyKNPYAML": clientEgressToCIDRExternalPolicyKNPYAML, "clientEgressToCIDRNodeKNPYAML": clientEgressToCIDRNodeKNPYAML, "clientEgressToCIDRExternalDenyPolicyYAML": clientEgressToCIDRExternalDenyPolicyYAML, + "clientEgressToCIDRGroupExternalDenyPolicyYAML": clientEgressToCIDRGroupExternalDenyPolicyYAML, "clientEgressL7HTTPPolicyYAML": clientEgressL7HTTPPolicyYAML, "clientEgressL7HTTPPolicyPortRangeYAML": clientEgressL7HTTPPolicyPortRangeYAML, "clientEgressL7HTTPNamedPortPolicyYAML": clientEgressL7HTTPNamedPortPolicyYAML, diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/client_egress_to_cidrgroup_deny.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/client_egress_to_cidrgroup_deny.go new file mode 100644 index 0000000000..bd89f2b5c7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/client_egress_to_cidrgroup_deny.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package builder + +import ( + "github.com/cilium/cilium/cilium-cli/connectivity/check" + "github.com/cilium/cilium/cilium-cli/connectivity/tests" + "github.com/cilium/cilium/cilium-cli/utils/features" +) + +type clientEgressToCidrgroupDeny struct{} + +func (t clientEgressToCidrgroupDeny) build(ct *check.ConnectivityTest, templates map[string]string) { + // This policy denies L3 traffic to ExternalCIDR except ExternalIP/32 + // It does so using a CiliumCIDRGroup + newTest("client-egress-to-cidrgroup-deny", ct). + WithCiliumPolicy(allowAllEgressPolicyYAML). // Allow all egress traffic + WithCiliumPolicy(templates["clientEgressToCIDRGroupExternalDenyPolicyYAML"]). + WithScenarios( + tests.PodToCIDR(tests.WithRetryDestIP(ct.Params().ExternalIP)), // Denies all traffic to ExternalOtherIP, but allow ExternalIP + ). + WithExpectations(func(a *check.Action) (egress, ingress check.Result) { + if a.Destination().Address(features.GetIPFamily(ct.Params().ExternalOtherIP)) == ct.Params().ExternalOtherIP { + return check.ResultPolicyDenyEgressDrop, check.ResultNone + } + if a.Destination().Address(features.GetIPFamily(ct.Params().ExternalIP)) == ct.Params().ExternalIP { + return check.ResultOK, check.ResultNone + } + return check.ResultDrop, check.ResultDrop + }) +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway.go index d505dea9f6..3925cd4c58 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway.go @@ -12,7 +12,8 @@ import ( type egressGateway struct{} func (t egressGateway) build(ct *check.ConnectivityTest, _ map[string]string) { - newTest("egress-gateway", ct). + // Prefix the test name with `seq-` to run it sequentially. + newTest("seq-egress-gateway", ct). WithCondition(func() bool { return ct.Params().IncludeUnsafeTests }). WithCiliumEgressGatewayPolicy(check.CiliumEgressGatewayPolicyParams{ Name: "cegp-sample-client", diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway_with_l7_policy.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway_with_l7_policy.go index 8a63e048b1..79433fb31b 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway_with_l7_policy.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/egress_gateway_with_l7_policy.go @@ -21,7 +21,8 @@ var clientEgressL7HTTPExternalYAML string type egressGatewayWithL7Policy struct{} func (t egressGatewayWithL7Policy) build(ct *check.ConnectivityTest, templates map[string]string) { - newTest("egress-gateway-with-l7-policy", ct). + // Prefix the test name with `seq-` to run it sequentially. + newTest("seq-egress-gateway-with-l7-policy", ct). WithCondition(func() bool { return versioncheck.MustCompile(">=1.16.0")(ct.CiliumVersion) && ct.Params().IncludeUnsafeTests }). diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/from_cidr_host_netns.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/from_cidr_host_netns.go index f865469f6d..429e37d3e1 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/from_cidr_host_netns.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/from_cidr_host_netns.go @@ -12,7 +12,8 @@ import ( type fromCidrHostNetns struct{} func (t fromCidrHostNetns) build(ct *check.ConnectivityTest, templates map[string]string) { - newTest("from-cidr-host-netns", ct). + // Prefix the test name with `seq-` to run it sequentially. + newTest("seq-from-cidr-host-netns", ct). WithCondition(func() bool { return ct.Params().IncludeUnsafeTests }). WithFeatureRequirements(features.RequireEnabled(features.NodeWithoutCilium)). WithCiliumPolicy(templates["echoIngressFromCIDRYAML"]). diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/manifests/client-egress-to-cidrgroup-external-deny.yaml b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/manifests/client-egress-to-cidrgroup-external-deny.yaml new file mode 100644 index 0000000000..361589271a --- /dev/null +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/manifests/client-egress-to-cidrgroup-external-deny.yaml @@ -0,0 +1,27 @@ +# This policy denies packets towards {{.ExternalOtherIP}}, but not {{.ExternalIP}} +# Please note that if there is no other allowed rule, the policy +# will be automatically denied {{.ExternalIP}} as well. + +apiVersion: "cilium.io/v2alpha1" +kind: CiliumCIDRGroup +metadata: + name: cilium-test-external-cidr +spec: + externalCIDRs: + - "{{.ExternalCIDR}}" + +--- + +apiVersion: "cilium.io/v2" +kind: CiliumNetworkPolicy +metadata: + name: client-egress-to-cidrgroup-deny +spec: + endpointSelector: + matchLabels: + kind: client + egressDeny: + - toCIDRSet: + - cidrGroupRef: cilium-test-external-cidr + except: + - "{{.ExternalIP}}/32" diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/multicast.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/multicast.go new file mode 100644 index 0000000000..6753a3ccf3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/multicast.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package builder + +import ( + "github.com/cilium/cilium/cilium-cli/connectivity/check" + "github.com/cilium/cilium/cilium-cli/connectivity/tests" + "github.com/cilium/cilium/cilium-cli/utils/features" + + "github.com/cilium/cilium/pkg/versioncheck" +) + +type multicast struct{} + +func (t multicast) build(ct *check.ConnectivityTest, _ map[string]string) { + newTest("multicast", ct). + WithCondition(func() bool { + return versioncheck.MustCompile(">=1.16.0")(ct.CiliumVersion) + }). + WithCondition(func() bool { + return ct.Params().IncludeUnsafeTests + }). + WithFeatureRequirements( + features.RequireEnabled(features.Multicast), + ). + WithScenarios(tests.SocatMulticast()) +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/pod_to_pod_encryption.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/pod_to_pod_encryption.go index 2231a12f13..33fa5fedaa 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/pod_to_pod_encryption.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/pod_to_pod_encryption.go @@ -29,13 +29,11 @@ func (t podToPodEncryption) build(ct *check.ConnectivityTest, _ map[string]strin WithCondition(func() bool { return !ct.Params().SingleNode }). WithFeatureRequirements( features.RequireEnabled(features.L7Proxy), - // Once https://github.com/cilium/cilium/issues/33168 is fixed, we - // can enable for IPsec too. - features.RequireMode(features.EncryptionPod, "wireguard"), + features.RequireEnabled(features.EncryptionPod), ). WithCiliumPolicy(clientsEgressL7HTTPFromAnyPolicyYAML). WithCiliumPolicy(echoIngressL7HTTPFromAnywherePolicyYAML). WithScenarios( - tests.PodToPodEncryption(features.RequireEnabled(features.EncryptionPod)), + tests.PodToPodEncryption(), ) } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/strict_mode_encryption.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/strict_mode_encryption.go new file mode 100644 index 0000000000..4a06db3b3e --- /dev/null +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/builder/strict_mode_encryption.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package builder + +import ( + "github.com/cilium/cilium/cilium-cli/connectivity/check" + "github.com/cilium/cilium/cilium-cli/connectivity/tests" + "github.com/cilium/cilium/cilium-cli/utils/features" +) + +type strictModeEncryption struct{} + +func (t strictModeEncryption) build(ct *check.ConnectivityTest, _ map[string]string) { + newTest("strict-mode-encryption", ct). + WithCondition(func() bool { return ct.Params().IncludeUnsafeTests }). + // Until https://github.com/cilium/cilium/pull/35454 is backported to <1.17.0 + WithCiliumVersion(">=1.17.0"). + WithFeatureRequirements( + features.RequireEnabled(features.EncryptionStrictMode), + // Strict mode is only supported with WireGuard + features.RequireMode(features.EncryptionPod, "wireguard"), + // Strict mode always allows host-to-host tunnel traffic + features.RequireDisabled(features.Tunnel), + ). + WithScenarios(tests.PodToPodMissingIPCache()). + WithExpectations(func(_ *check.Action) (egress, ingress check.Result) { + return check.ResultEgressUnencryptedDrop, check.ResultEgressUnencryptedDrop + }) +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/action.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/action.go index c1da052b74..ef12bce90d 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/action.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/action.go @@ -28,7 +28,6 @@ import ( "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/utils/features" hubprinter "github.com/cilium/cilium/hubble/pkg/printer" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/lock" ) @@ -802,7 +801,7 @@ func (a *Action) waitForRelay(ctx context.Context, client observer.ObserverClien select { case <-ctx.Done(): return fmt.Errorf("hubble server status failure: %w", ctx.Err()) - case <-inctimer.After(time.Second): + case <-time.After(time.Second): a.Debug("retrying hubble relay server status request") } } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/check.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/check.go index 28de1d9d4d..8ad92d0b03 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/check.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/check.go @@ -63,6 +63,7 @@ type Parameters struct { JSONMockImage string TestConnDisruptImage string FRRImage string + SocatImage string AgentDaemonSetName string DNSTestServerImage string IncludeUnsafeTests bool diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/context.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/context.go index 35e3d0de7f..c57892d1f3 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/context.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/context.go @@ -31,6 +31,10 @@ import ( "github.com/cilium/cilium/pkg/lock" ) +const ( + socatMulticastTestMsg = "Multicast test message" +) + // ConnectivityTest is the root context of the connectivity test suite // and holds all resources belonging to it. It implements interface // ConnectivityTest and is instantiated once at the start of the program, @@ -71,6 +75,8 @@ type ConnectivityTest struct { lrpClientPods map[string]Pod lrpBackendPods map[string]Pod frrPods []Pod + socatServerPods []Pod + socatClientPods []Pod hostNetNSPodsByNode map[string]Pod secondaryNetworkNodeIPv4 map[string]string // node name => secondary ip @@ -211,6 +217,8 @@ func NewConnectivityTest( clientCPPods: make(map[string]Pod), lrpClientPods: make(map[string]Pod), lrpBackendPods: make(map[string]Pod), + socatServerPods: []Pod{}, + socatClientPods: []Pod{}, perfClientPods: []Pod{}, perfServerPod: []Pod{}, PerfResults: []common.PerfSummary{}, @@ -1076,6 +1084,14 @@ func (ct *ConnectivityTest) PerfClientPods() []Pod { return ct.perfClientPods } +func (ct *ConnectivityTest) SocatServerPods() []Pod { + return ct.socatServerPods +} + +func (ct *ConnectivityTest) SocatClientPods() []Pod { + return ct.socatClientPods +} + func (ct *ConnectivityTest) EchoPods() map[string]Pod { return ct.echoPods } @@ -1210,3 +1226,26 @@ func (ct *ConnectivityTest) EchoServicePrefixes(ipFamily features.IPFamily) []ne } return res } + +// Multicast packet sender +// This command exits with exit code 0 +// WITHOUT waiting for a second after receiving a packet. +func (ct *ConnectivityTest) SocatServer1secCommand(peer TestPeer, port int, group string) []string { + addr := peer.Address(features.IPFamilyV4) + cmdStr := fmt.Sprintf("timeout 5 socat STDIO UDP4-RECVFROM:%d,ip-add-membership=%s:%s", port, group, addr) + cmd := strings.Fields(cmdStr) + return cmd +} + +// Multicast packet receiver +func (ct *ConnectivityTest) SocatClientCommand(port int, group string) []string { + portStr := fmt.Sprintf("%d", port) + cmdStr := fmt.Sprintf(`for i in $(seq 1 10000); do echo "%s" | socat - UDP-DATAGRAM:%s:%s; sleep 0.1; done`, socatMulticastTestMsg, group, portStr) + cmd := []string{"/bin/sh", "-c", cmdStr} + return cmd +} + +func (ct *ConnectivityTest) KillMulticastTestSender() []string { + cmd := []string{"pkill", "-f", socatMulticastTestMsg} + return cmd +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/deployment.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/deployment.go index 8ada6a1ed9..70a1828f71 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/deployment.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/deployment.go @@ -395,6 +395,30 @@ func (ct *ConnectivityTest) ingresses() map[string]string { return ingresses } +// maybeNodeToNodeEncryptionAffinity returns a node affinity term to prefer nodes +// not being part of the control plane when node to node encryption is enabled, +// because they are excluded by default from node to node encryption. This logic +// is currently suboptimal as it only accounts for the default selector, for the +// sake of simplicity, but it should cover all common use cases. +func (ct *ConnectivityTest) maybeNodeToNodeEncryptionAffinity() *corev1.NodeAffinity { + encryptNode, _ := ct.Feature(features.EncryptionNode) + if !encryptNode.Enabled || encryptNode.Mode == "" { + return nil + } + + return &corev1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{{ + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{{ + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.NodeSelectorOpDoesNotExist, + }}, + }, + }}, + } +} + // deploy ensures the test Namespace, Services and Deployments are running on the cluster. func (ct *ConnectivityTest) deploy(ctx context.Context) error { var err error @@ -633,6 +657,7 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { }, }, }, + NodeAffinity: ct.maybeNodeToNodeEncryptionAffinity(), }, ReadinessProbe: newLocalReadinessProbe(containerPort, "/"), }, ct.params.DNSTestServerImage) @@ -655,6 +680,7 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { Image: ct.params.CurlImage, Command: []string{"/usr/bin/pause"}, Annotations: ct.params.DeploymentAnnotations.Match(clientDeploymentName), + Affinity: &corev1.Affinity{NodeAffinity: ct.maybeNodeToNodeEncryptionAffinity()}, NodeSelector: ct.params.NodeSelector, }) _, err = ct.clients.src.CreateServiceAccount(ctx, ct.params.TestNamespace, k8s.NewServiceAccount(clientDeploymentName), metav1.CreateOptions{}) @@ -691,6 +717,7 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { }, }, }, + NodeAffinity: ct.maybeNodeToNodeEncryptionAffinity(), }, NodeSelector: ct.params.NodeSelector, }) @@ -729,6 +756,7 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { }, }, }, + NodeAffinity: ct.maybeNodeToNodeEncryptionAffinity(), }, NodeSelector: ct.params.NodeSelector, }) @@ -832,6 +860,7 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { }, }, }, + NodeAffinity: ct.maybeNodeToNodeEncryptionAffinity(), }, NodeSelector: ct.params.NodeSelector, ReadinessProbe: newLocalReadinessProbe(containerPort, "/"), @@ -1042,6 +1071,34 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { } } + if ct.Features[features.Multicast].Enabled { + _, err = ct.clients.src.GetDeployment(ctx, ct.params.TestNamespace, socatClientDeploymentName, metav1.GetOptions{}) + if err != nil { + ct.Logf("✨ [%s] Deploying %s deployment...", ct.clients.src.ClusterName(), socatClientDeploymentName) + ds := NewSocatClientDeployment(ct.params) + _, err = ct.clients.src.CreateServiceAccount(ctx, ct.params.TestNamespace, k8s.NewServiceAccount(socatClientDeploymentName), metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("unable to create service account %s: %w", socatClientDeploymentName, err) + } + _, err = ct.clients.src.CreateDeployment(ctx, ct.params.TestNamespace, ds, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("unable to create deployment %s: %w", socatClientDeploymentName, err) + } + } + } + + if ct.Features[features.Multicast].Enabled { + _, err = ct.clients.src.GetDaemonSet(ctx, ct.params.TestNamespace, socatServerDaemonsetName, metav1.GetOptions{}) + if err != nil { + ct.Logf("✨ [%s] Deploying %s daemonset...", ct.clients.src.ClusterName(), socatServerDaemonsetName) + ds := NewSocatServerDaemonSet(ct.params) + _, err = ct.clients.src.CreateDaemonSet(ctx, ct.params.TestNamespace, ds, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("unable to create daemonset %s: %w", socatServerDaemonsetName, err) + } + } + } + return nil } @@ -1202,6 +1259,10 @@ func (ct *ConnectivityTest) deploymentList() (srcList []string, dstList []string srcList = append(srcList, lrpBackendDeploymentName) } + if ct.Features[features.Multicast].Enabled { + srcList = append(srcList, socatClientDeploymentName) + } + return srcList, dstList } @@ -1212,6 +1273,8 @@ func (ct *ConnectivityTest) deleteDeployments(ctx context.Context, client *k8s.C _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, clientDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, client2DeploymentName, metav1.DeleteOptions{}) _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, client3DeploymentName, metav1.DeleteOptions{}) + _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, socatClientDeploymentName, metav1.DeleteOptions{}) + _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, socatServerDaemonsetName, metav1.DeleteOptions{}) // Q:Daemonset in here is OK? _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, echoSameNodeDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, echoOtherNodeDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, clientDeploymentName, metav1.DeleteOptions{}) @@ -1419,6 +1482,35 @@ func (ct *ConnectivityTest) validateDeployment(ctx context.Context) error { } } + if ct.Features[features.Multicast].Enabled { + // socat client pods + socatCilentPods, err := ct.clients.src.ListPods(ctx, ct.params.TestNamespace, metav1.ListOptions{LabelSelector: "name=" + socatClientDeploymentName}) + if err != nil { + return fmt.Errorf("unable to list socat client pods: %w", err) + } + for _, pod := range socatCilentPods.Items { + ct.socatClientPods = append(ct.socatClientPods, Pod{ + K8sClient: ct.client, + Pod: pod.DeepCopy(), + }) + } + + // socat server pods + if err := WaitForDaemonSet(ctx, ct, ct.clients.src, ct.Params().TestNamespace, socatServerDaemonsetName); err != nil { + return err + } + socatServerPods, err := ct.clients.src.ListPods(ctx, ct.params.TestNamespace, metav1.ListOptions{LabelSelector: "name=" + socatServerDaemonsetName}) + if err != nil { + return fmt.Errorf("unable to list socat server pods: %w", err) + } + for _, pod := range socatServerPods.Items { + ct.socatServerPods = append(ct.socatServerPods, Pod{ + K8sClient: ct.client, + Pod: pod.DeepCopy(), + }) + } + } + for _, cp := range ct.clientPods { if err := WaitForCoreDNS(ctx, ct, cp); err != nil { return err diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/features.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/features.go index 30063d0c2e..a5ed68c62c 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/features.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/features.go @@ -66,6 +66,7 @@ func (ct *ConnectivityTest) extractFeaturesFromRuntimeConfig(ctx context.Context result[features.EncryptionNode] = features.Status{ Enabled: cfg.EncryptNode, + Mode: cfg.NodeEncryptionOptOutLabelsString, } isFeatureKNPEnabled, err := ct.isFeatureKNPEnabled(cfg.EnableK8sNetworkPolicy) diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/metrics.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/metrics.go index 6ef5089681..89d677ba3f 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/metrics.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/metrics.go @@ -12,7 +12,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" - "github.com/cilium/cilium/cilium-cli/k8s" + "github.com/cilium/cilium/pkg/k8s" ) // metricsURLFormat is the path format to retrieve the metrics on the diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/netshoot.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/netshoot.go new file mode 100644 index 0000000000..60360b7e1b --- /dev/null +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/netshoot.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package check + +import appsv1 "k8s.io/api/apps/v1" + +const ( + // SocatServerPort is the port on which the socat server listens. + socatServerDaemonsetName = "socat-server-daemonset" + socatClientDeploymentName = "socat-client" +) + +func NewSocatServerDaemonSet(params Parameters) *appsv1.DaemonSet { + ds := newDaemonSet(daemonSetParameters{ + Name: socatServerDaemonsetName, + Kind: socatServerDaemonsetName, + Image: params.SocatImage, + Command: []string{"/bin/sh", "-c", "sleep 10000000"}, + }) + return ds +} + +func NewSocatClientDeployment(params Parameters) *appsv1.Deployment { + dep := newDeployment(deploymentParameters{ + Name: socatClientDeploymentName, + Kind: socatClientDeploymentName, + Image: params.SocatImage, + Replicas: 1, + Command: []string{"/bin/sh", "-c", "sleep 10000000"}, + }) + return dep +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/policy.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/policy.go index f8d9ae1483..c78ee971a5 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/policy.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/policy.go @@ -5,9 +5,8 @@ package check import ( "context" + "encoding/json" "fmt" - "maps" - "reflect" "strconv" "strings" "time" @@ -15,16 +14,20 @@ import ( networkingv1 "k8s.io/api/networking/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - clientsetscheme "k8s.io/client-go/kubernetes/scheme" flowpb "github.com/cilium/cilium/api/v1/flow" "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/k8s" + "github.com/cilium/cilium/cilium-cli/utils/features" + k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/policy/api" ) /* How many times we should retry getting the policy revisions before @@ -106,6 +109,25 @@ type client[T policy] interface { Update(ctx context.Context, networkPolicy T, opts metav1.UpdateOptions) (T, error) } +// createOrUpdate applies a generic object to the cluster, returning true if it was updated +func createOrUpdate(ctx context.Context, client *k8s.Client, obj k8s.Object) (bool, error) { + existing, err := client.GetGeneric(ctx, obj.GetNamespace(), obj.GetName(), obj) + if err != nil && !k8serrors.IsNotFound(err) { + return false, fmt.Errorf("failed to retrieve %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + + created, err := client.ApplyGeneric(ctx, obj) + if err != nil { + return false, fmt.Errorf("failed to create / update %s %s/%s: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetNamespace(), obj.GetName(), err) + } + + if existing == nil { + return true, nil + } + + return existing.GetGeneration() != created.GetGeneration(), nil +} + // CreateOrUpdatePolicy implements the generic logic to create or update a policy. func CreateOrUpdatePolicy[T policy](ctx context.Context, client client[T], obj T, mutator func(obj T) bool) (bool, error) { // Let's attempt to create the policy. We optimize the creation path @@ -141,137 +163,6 @@ func CreateOrUpdatePolicy[T policy](ctx context.Context, client client[T], obj T return true, nil } -// createOrUpdateCNP creates the CNP and updates it if it already exists. -func createOrUpdateCNP(ctx context.Context, client *k8s.Client, cnp *ciliumv2.CiliumNetworkPolicy) (bool, error) { - return CreateOrUpdatePolicy(ctx, client.CiliumClientset.CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()), - cnp, func(current *ciliumv2.CiliumNetworkPolicy) bool { - if maps.Equal(current.GetLabels(), cnp.GetLabels()) && - current.Spec.DeepEqual(cnp.Spec) && - current.Specs.DeepEqual(&cnp.Specs) { - return false - } - - current.ObjectMeta.Labels = cnp.ObjectMeta.Labels - current.Spec = cnp.Spec - current.Specs = cnp.Specs - return true - }, - ) -} - -// createOrUpdateCCNP creates the CCNP and updates it if it already exists. -func createOrUpdateCCNP(ctx context.Context, client *k8s.Client, ccnp *ciliumv2.CiliumClusterwideNetworkPolicy) (bool, error) { - return CreateOrUpdatePolicy(ctx, client.CiliumClientset.CiliumV2().CiliumClusterwideNetworkPolicies(), - ccnp, func(current *ciliumv2.CiliumClusterwideNetworkPolicy) bool { - if maps.Equal(current.GetLabels(), ccnp.GetLabels()) && - current.Spec.DeepEqual(ccnp.Spec) && - current.Specs.DeepEqual(&ccnp.Specs) { - return false - } - - current.ObjectMeta.Labels = ccnp.ObjectMeta.Labels - current.Spec = ccnp.Spec - current.Specs = ccnp.Specs - return true - }, - ) -} - -// createOrUpdateKNP creates the KNP and updates it if it already exists. -func createOrUpdateKNP(ctx context.Context, client *k8s.Client, knp *networkingv1.NetworkPolicy) (bool, error) { - return CreateOrUpdatePolicy(ctx, client.Clientset.NetworkingV1().NetworkPolicies(knp.GetNamespace()), - knp, func(current *networkingv1.NetworkPolicy) bool { - if maps.Equal(current.GetLabels(), knp.GetLabels()) && - reflect.DeepEqual(current.Spec, knp.Spec) { - return false - } - - current.ObjectMeta.Labels = knp.ObjectMeta.Labels - current.Spec = knp.Spec - return true - }, - ) -} - -// createOrUpdateCEGP creates the CEGP and updates it if it already exists. -func createOrUpdateCEGP(ctx context.Context, client *k8s.Client, cegp *ciliumv2.CiliumEgressGatewayPolicy) error { - _, err := CreateOrUpdatePolicy(ctx, client.CiliumClientset.CiliumV2().CiliumEgressGatewayPolicies(), - cegp, func(current *ciliumv2.CiliumEgressGatewayPolicy) bool { - if maps.Equal(current.GetLabels(), cegp.GetLabels()) && - current.Spec.DeepEqual(&cegp.Spec) { - return false - } - - current.ObjectMeta.Labels = cegp.ObjectMeta.Labels - current.Spec = cegp.Spec - return true - }, - ) - return err -} - -// createOrUpdateCLRP creates the CLRP and updates it if it already exists. -func createOrUpdateCLRP(ctx context.Context, client *k8s.Client, clrp *ciliumv2.CiliumLocalRedirectPolicy) error { - _, err := CreateOrUpdatePolicy(ctx, client.CiliumClientset.CiliumV2().CiliumLocalRedirectPolicies(clrp.Namespace), - clrp, func(current *ciliumv2.CiliumLocalRedirectPolicy) bool { - if maps.Equal(current.GetLabels(), clrp.GetLabels()) && - current.Spec.DeepEqual(&clrp.Spec) { - return false - } - - current.ObjectMeta.Labels = clrp.ObjectMeta.Labels - current.Spec = clrp.Spec - return true - }, - ) - return err -} - -// deleteCNP deletes a CiliumNetworkPolicy from the cluster. -func deleteCNP(ctx context.Context, client *k8s.Client, cnp *ciliumv2.CiliumNetworkPolicy) error { - if err := client.DeleteCiliumNetworkPolicy(ctx, cnp.Namespace, cnp.Name, metav1.DeleteOptions{}); err != nil { - return fmt.Errorf("%s/%s/%s policy delete failed: %w", client.ClusterName(), cnp.Namespace, cnp.Name, err) - } - - return nil -} - -// deleteCNP deletes a CiliumNetworkPolicy from the cluster. -func deleteCCNP(ctx context.Context, client *k8s.Client, ccnp *ciliumv2.CiliumClusterwideNetworkPolicy) error { - if err := client.DeleteCiliumClusterwideNetworkPolicy(ctx, ccnp.Name, metav1.DeleteOptions{}); err != nil { - return fmt.Errorf("%s/%s policy delete failed: %w", client.ClusterName(), ccnp.Name, err) - } - - return nil -} - -// deleteKNP deletes a Kubernetes NetworkPolicy from the cluster. -func deleteKNP(ctx context.Context, client *k8s.Client, knp *networkingv1.NetworkPolicy) error { - if err := client.DeleteKubernetesNetworkPolicy(ctx, knp.Namespace, knp.Name, metav1.DeleteOptions{}); err != nil { - return fmt.Errorf("%s/%s/%s policy delete failed: %w", client.ClusterName(), knp.Namespace, knp.Name, err) - } - - return nil -} - -// deleteCEGP deletes a CiliumEgressGatewayPolicy from the cluster. -func deleteCEGP(ctx context.Context, client *k8s.Client, cegp *ciliumv2.CiliumEgressGatewayPolicy) error { - if err := client.DeleteCiliumEgressGatewayPolicy(ctx, cegp.Name, metav1.DeleteOptions{}); err != nil { - return fmt.Errorf("%s/%s policy delete failed: %w", client.ClusterName(), cegp.Name, err) - } - - return nil -} - -// deleteCLRP deletes a CiliumLocalRedirectPolicy from the cluster. -func deleteCLRP(ctx context.Context, client *k8s.Client, clrp *ciliumv2.CiliumLocalRedirectPolicy) error { - if err := client.DeleteCiliumLocalRedirectPolicy(ctx, clrp.Namespace, clrp.Name, metav1.DeleteOptions{}); err != nil { - return fmt.Errorf("%s/%s/%s policy delete failed: %w", client.ClusterName(), clrp.Namespace, clrp.Name, err) - } - - return nil -} - func defaultDropReason(flow *flowpb.Flow) bool { return flow.GetDropReasonDesc() != flowpb.DropReason_DROP_REASON_UNKNOWN } @@ -288,6 +179,10 @@ func authRequiredDropReason(flow *flowpb.Flow) bool { return flow.GetDropReasonDesc() == flowpb.DropReason_AUTH_REQUIRED } +func unencryptedDropReason(flow *flowpb.Flow) bool { + return flow.GetDropReasonDesc() == flowpb.DropReason_UNENCRYPTED_TRAFFIC +} + type ExpectationsFunc func(a *Action) (egress, ingress Result) // WithExpectations sets the getExpectations test result function to use during tests @@ -335,35 +230,6 @@ func RegisterPolicy[T policy](current map[string]T, policies ...T) (map[string]T return current, nil } -// addCNPs adds one or more CiliumNetworkPolicy resources to the Test. -func (t *Test) addCNPs(cnps ...*ciliumv2.CiliumNetworkPolicy) (err error) { - t.cnps, err = RegisterPolicy(t.cnps, cnps...) - return err -} - -// addCNPs adds one or more CiliumClusterwideNetworkPolicy resources to the Test. -func (t *Test) addCCNPs(ccnps ...*ciliumv2.CiliumClusterwideNetworkPolicy) (err error) { - t.ccnps, err = RegisterPolicy(t.ccnps, ccnps...) - return err -} - -// addKNPs adds one or more K8S NetworkPolicy resources to the Test. -func (t *Test) addKNPs(policies ...*networkingv1.NetworkPolicy) (err error) { - t.knps, err = RegisterPolicy(t.knps, policies...) - return err -} - -// addCEGPs adds one or more CiliumEgressGatewayPolicy resources to the Test. -func (t *Test) addCEGPs(cegps ...*ciliumv2.CiliumEgressGatewayPolicy) (err error) { - t.cegps, err = RegisterPolicy(t.cegps, cegps...) - return err -} - -func (t *Test) addCLRPs(clrps ...*ciliumv2.CiliumLocalRedirectPolicy) (err error) { - t.clrps, err = RegisterPolicy(t.clrps, clrps...) - return err -} - func sumMap(m map[string]int) int { sum := 0 for _, v := range m { @@ -376,9 +242,18 @@ func sumMap(m map[string]int) int { // can apply or delete policies in case of connectivity test concurrency > 1 var policyApplyDeleteLock = lock.Mutex{} -// applyPolicies applies all the Test's registered network policies. -func (t *Test) applyPolicies(ctx context.Context) error { - if len(t.cnps) == 0 && len(t.ccnps) == 0 && len(t.knps) == 0 && len(t.cegps) == 0 && len(t.clrps) == 0 { +// isPolicy returns true if the object is a network policy, and thus +// should bump the policy revision. +func isPolicy(obj k8s.Object) bool { + gk := obj.GetObjectKind().GroupVersionKind().GroupKind() + return (gk == schema.GroupKind{Group: ciliumv2.CustomResourceDefinitionGroup, Kind: ciliumv2.CNPKindDefinition} || + gk == schema.GroupKind{Group: ciliumv2.CustomResourceDefinitionGroup, Kind: ciliumv2.CCNPKindDefinition} || + gk == schema.GroupKind{Group: networkingv1.GroupName, Kind: "NetworkPolicy"}) +} + +// applyResources applies all the Test's registered additional resources +func (t *Test) applyResources(ctx context.Context) error { + if len(t.resources) == 0 { return nil } @@ -397,73 +272,28 @@ func (t *Test) applyPolicies(ctx context.Context) error { // Incremented, by cluster, for every expected revision. revDeltas := map[string]int{} - // Apply all given CiliumNetworkPolicies. - for _, cnp := range t.cnps { - for _, client := range t.Context().clients.clients() { - t.Infof("📜 Applying CiliumNetworkPolicy '%s' to namespace '%s'..", cnp.Name, cnp.Namespace) - changed, err := createOrUpdateCNP(ctx, client, cnp) - if err != nil { - return fmt.Errorf("policy application failed: %w", err) - } - if changed { - revDeltas[client.ClusterName()]++ - } - } - } - // Apply all given CiliumClusterwideNetworkPolicy. - for _, ccnp := range t.ccnps { + // apply resources to all clusters + for _, obj := range t.resources { + kind := obj.GetObjectKind().GroupVersionKind().Kind for _, client := range t.Context().clients.clients() { - t.Infof("📜 Applying CiliumClusterwideNetworkPolicy '%s'..", ccnp.Name) - changed, err := createOrUpdateCCNP(ctx, client, ccnp) + t.Infof("📜 Applying %s '%s' to namespace '%s' on cluster %s..", kind, obj.GetName(), obj.GetNamespace(), client.ClusterName()) + changed, err := createOrUpdate(ctx, client, obj) if err != nil { - return fmt.Errorf("policy application failed: %w", err) - } - if changed { - revDeltas[client.ClusterName()]++ + return fmt.Errorf("failed to apply %s '%s' to namespace '%s' on cluster %s: %w", kind, obj.GetName(), obj.GetNamespace(), client.ClusterName(), err) } - } - } - // Apply all given Kubernetes Network Policies. - for _, knp := range t.knps { - for _, client := range t.Context().clients.clients() { - t.Infof("📜 Applying KubernetesNetworkPolicy '%s' to namespace '%s'..", knp.Name, knp.Namespace) - changed, err := createOrUpdateKNP(ctx, client, knp) - if err != nil { - return fmt.Errorf("policy application failed: %w", err) - } - if changed { + if changed && isPolicy(obj) { revDeltas[client.ClusterName()]++ } } } - // Apply all given Cilium Egress Gateway Policies. - for _, cegp := range t.cegps { - for _, client := range t.Context().clients.clients() { - t.Infof("📜 Applying CiliumEgressGatewayPolicy '%s' to namespace '%s'..", cegp.Name, cegp.Namespace) - if err := createOrUpdateCEGP(ctx, client, cegp); err != nil { - return fmt.Errorf("policy application failed: %w", err) - } - } - } - - // Apply all given Cilium Local Redirect Policies. - for _, clrp := range t.clrps { - for _, client := range t.Context().clients.clients() { - t.Infof("📜 Applying CiliumLocalRedirectPolicy '%s' to namespace '%s'..", clrp.Name, clrp.Namespace) - if err := createOrUpdateCLRP(ctx, client, clrp); err != nil { - return fmt.Errorf("policy application failed: %w", err) - } - } - } - // Register a finalizer with the Test immediately to enable cleanup. // If we return a cleanup closure from this function, cleanup cannot be // performed if the user cancels during the policy revision wait time. t.finalizers = append(t.finalizers, func(ctx context.Context) error { - if err := t.deletePolicies(ctx); err != nil { + if err := t.deleteResources(ctx); err != nil { t.CiliumLogs(ctx) return err } @@ -484,29 +314,17 @@ func (t *Test) applyPolicies(ctx context.Context) error { } } - if len(t.cnps) > 0 { - t.Debugf("📜 Successfully applied %d CiliumNetworkPolicies", len(t.cnps)) - } - if len(t.ccnps) > 0 { - t.Debugf("📜 Successfully applied %d CiliumClusterwideNetworkPolicies", len(t.ccnps)) - } - if len(t.knps) > 0 { - t.Debugf("📜 Successfully applied %d K8S NetworkPolicies", len(t.knps)) - } - if len(t.cegps) > 0 { - t.Debugf("📜 Successfully applied %d CiliumEgressGatewayPolicies", len(t.cegps)) - } - - if len(t.clrps) > 0 { - t.Debugf("📜 Successfully applied %d CiliumLocalRedirectPolicies", len(t.clrps)) + if len(t.resources) > 0 { + t.Debugf("📜 Successfully applied %d additional resources", len(t.resources)) } return nil } -// deletePolicies deletes a given set of network policies from the cluster. -func (t *Test) deletePolicies(ctx context.Context) error { - if len(t.cnps) == 0 && len(t.ccnps) == 0 && len(t.knps) == 0 && len(t.cegps) == 0 && len(t.clrps) == 0 { +// deleteResources deletes the previously-created set of resources that +// belong to this test. +func (t *Test) deleteResources(ctx context.Context) error { + if len(t.resources) == 0 { return nil } @@ -523,84 +341,30 @@ func (t *Test) deletePolicies(ctx context.Context) error { } revDeltas := map[string]int{} - // Delete all the Test's CNPs from all clients. - for _, cnp := range t.cnps { - t.Infof("📜 Deleting CiliumNetworkPolicy '%s' from namespace '%s'..", cnp.Name, cnp.Namespace) - for _, client := range t.Context().clients.clients() { - if err := deleteCNP(ctx, client, cnp); err != nil { - return fmt.Errorf("deleting CiliumNetworkPolicy: %w", err) - } - revDeltas[client.ClusterName()]++ - } - } - - // Delete all the Test's CCNPs from all clients. - for _, ccnp := range t.ccnps { - t.Infof("📜 Deleting CiliumClusterwideNetworkPolicy '%s'..", ccnp.Name) - for _, client := range t.Context().clients.clients() { - if err := deleteCCNP(ctx, client, ccnp); err != nil { - return fmt.Errorf("deleting CiliumClusterwideNetworkPolicy: %w", err) - } - revDeltas[client.ClusterName()]++ - } - } - - // Delete all the Test's KNPs from all clients. - for _, knp := range t.knps { - t.Infof("📜 Deleting K8S NetworkPolicy '%s' from namespace '%s'..", knp.Name, knp.Namespace) - for _, client := range t.Context().clients.clients() { - if err := deleteKNP(ctx, client, knp); err != nil { - return fmt.Errorf("deleting K8S NetworkPolicy: %w", err) - } - revDeltas[client.ClusterName()]++ - } - } - - // Delete all the Test's CEGPs from all clients. - for _, cegp := range t.cegps { - t.Infof("📜 Deleting CiliumEgressGatewayPolicy '%s' from namespace '%s'..", cegp.Name, cegp.Namespace) + for _, obj := range t.resources { + kind := obj.GetObjectKind().GroupVersionKind().Kind for _, client := range t.Context().clients.clients() { - if err := deleteCEGP(ctx, client, cegp); err != nil { - return fmt.Errorf("deleting CiliumEgressGatewayPolicy: %w", err) + t.Infof("📜 Deleting %s '%s' in namespace '%s' on cluster %s..", kind, obj.GetName(), obj.GetNamespace(), client.ClusterName()) + err := client.DeleteGeneric(ctx, obj) + if err != nil { + return fmt.Errorf("failed to delete %s '%s' in namespace '%s' on cluster %s: %w", kind, obj.GetName(), obj.GetNamespace(), client.ClusterName(), err) } - } - } - // Delete all the Test's CLRPs from all clients. - for _, clrp := range t.clrps { - t.Infof("📜 Deleting CiliumLocalRedirectPolicy '%s' from namespace '%s'..", clrp.Name, clrp.Namespace) - for _, client := range t.Context().clients.clients() { - if err := deleteCLRP(ctx, client, clrp); err != nil { - return fmt.Errorf("deleting CiliumLocalRedirectPolicy: %w", err) + if isPolicy(obj) { + revDeltas[client.ClusterName()]++ } } } - if len(t.cnps) != 0 || len(t.ccnps) != 0 || len(t.knps) != 0 || len(t.clrps) != 0 { + if len(revDeltas) > 0 { // Wait for policies to be deleted on all Cilium nodes. if err := t.waitCiliumPolicyRevisions(ctx, revs, revDeltas); err != nil { - return fmt.Errorf("timed out removing policies on Cilium agents: %w", err) + return fmt.Errorf("timed out waiting for policy updates to be processed on Cilium agents: %w", err) } } - if len(t.cnps) > 0 { - t.Debugf("📜 Successfully deleted %d CiliumNetworkPolicies", len(t.cnps)) - } - - if len(t.ccnps) > 0 { - t.Debugf("📜 Successfully deleted %d CiliumClusterwideNetworkPolicies", len(t.ccnps)) - } - - if len(t.knps) > 0 { - t.Debugf("📜 Successfully deleted %d K8S NetworkPolicy", len(t.knps)) - } - - if len(t.cegps) > 0 { - t.Debugf("📜 Successfully deleted %d CiliumEgressGatewayPolicies", len(t.cegps)) - } - - if len(t.clrps) > 0 { - t.Debugf("📜 Successfully deleted %d CiliumLocalRedirectPolicies", len(t.clrps)) + if len(t.resources) > 0 { + t.Debugf("📜 Successfully deleted %d resources", len(t.resources)) } return nil @@ -610,7 +374,7 @@ func (t *Test) deletePolicies(ctx context.Context) error { // filter is applied on each line of output. func (t *Test) CiliumLogs(ctx context.Context) { for _, pod := range t.Context().ciliumPods { - log, err := pod.K8sClient.CiliumLogs(ctx, pod.Pod.Namespace, pod.Pod.Name, t.startTime) + log, err := pod.K8sClient.CiliumLogs(ctx, pod.Pod.Namespace, pod.Pod.Name, t.startTime, false) if err != nil { t.Fatalf("Error reading Cilium logs: %s", err) } @@ -618,58 +382,155 @@ func (t *Test) CiliumLogs(ctx context.Context) { } } -// ParsePolicyYAML decodes a yaml file into a slice of policies. -func ParsePolicyYAML[T runtime.Object](input string, scheme *runtime.Scheme) (output []T, err error) { - if input == "" { - return nil, nil - } - - yamls := strings.Split(input, "\n---") +// tweakPolicy adjusts a test-dependent resource to insert the namespace +// in known objects. +func (t *Test) tweakPolicy(in *unstructured.Unstructured) *unstructured.Unstructured { + group := in.GroupVersionKind().Group + kind := in.GroupVersionKind().Kind - for _, yaml := range yamls { - if strings.TrimSpace(yaml) == "" { - continue + var tweaked runtime.Object + if group == ciliumv2.CustomResourceDefinitionGroup && kind == ciliumv2.CNPKindDefinition { + t.WithFeatureRequirements(features.RequireEnabled(features.CNP)) + cnp := ciliumv2.CiliumNetworkPolicy{} + if err := convertInto(in, &cnp); err != nil { + t.Fatalf("could not parse CiliumNetworkPolicy: %v", err) + return nil + } + if cnp.Namespace == "" { + cnp.Namespace = t.ctx.params.TestNamespace } + configureNamespaceInPolicySpec(cnp.Spec, t.ctx.params.TestNamespace) + tweaked = &cnp + } - obj, kind, err := serializer.NewCodecFactory(scheme, serializer.EnableStrict).UniversalDeserializer().Decode([]byte(yaml), nil, nil) - if err != nil { - return nil, fmt.Errorf("decoding yaml file: %s\nerror: %w", yaml, err) + if group == ciliumv2.CustomResourceDefinitionGroup && kind == ciliumv2.CCNPKindDefinition { + t.WithFeatureRequirements(features.RequireEnabled(features.CCNP)) + ccnp := ciliumv2.CiliumClusterwideNetworkPolicy{} + if err := convertInto(in, &ccnp); err != nil { + t.Fatalf("could not parse CiliumClusterwideNetworkPolicy: %v", err) + return nil } + configureNamespaceInPolicySpec(ccnp.Spec, t.ctx.params.TestNamespace) + tweaked = &ccnp + } - switch policy := obj.(type) { - case T: - output = append(output, policy) - default: - return nil, fmt.Errorf("unknown type '%s' in: %s", kind.Kind, yaml) + if group == networkingv1.GroupName && kind == "NetworkPolicy" { + t.WithFeatureRequirements(features.RequireEnabled(features.KNP)) + knp := networkingv1.NetworkPolicy{} + if err := convertInto(in, &knp); err != nil { + t.Fatalf("could not parse NetworkPolicy: %v", err) + return nil } + configureNamespaceInKNP(&knp, t.ctx.params.TestNamespace) + tweaked = &knp } - return output, nil -} + if tweaked == nil { + return in + } -// parseCiliumPolicyYAML decodes policy yaml into a slice of CiliumNetworkPolicies. -func parseCiliumPolicyYAML(policy string) (cnps []*ciliumv2.CiliumNetworkPolicy, err error) { - return ParsePolicyYAML[*ciliumv2.CiliumNetworkPolicy](policy, scheme.Scheme) + out := unstructured.Unstructured{} + if err := convertInto(tweaked, &out); err != nil { + t.Fatalf("could not convert tweaked object") // unreachable + return nil + } + return &out } -// parseCiliumClusterwidePolicyYAML decodes policy yaml into a slice of CiliumClusterwideNetworkPolicy. -func parseCiliumClusterwidePolicyYAML(policy string) (cnps []*ciliumv2.CiliumClusterwideNetworkPolicy, err error) { - return ParsePolicyYAML[*ciliumv2.CiliumClusterwideNetworkPolicy](policy, scheme.Scheme) +func configureNamespaceInPolicySpec(spec *api.Rule, namespace string) { + if spec == nil { + return + } + + for _, k := range []string{ + k8sConst.PodNamespaceLabel, + KubernetesSourcedLabelPrefix + k8sConst.PodNamespaceLabel, + AnySourceLabelPrefix + k8sConst.PodNamespaceLabel, + } { + for _, e := range spec.Egress { + for _, es := range e.ToEndpoints { + if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.MatchLabels[k] = namespace + } + } + } + for _, e := range spec.Ingress { + for _, es := range e.FromEndpoints { + if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.MatchLabels[k] = namespace + } + } + } + + for _, e := range spec.EgressDeny { + for _, es := range e.ToEndpoints { + if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.MatchLabels[k] = namespace + } + } + } + + for _, e := range spec.IngressDeny { + for _, es := range e.FromEndpoints { + if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.MatchLabels[k] = namespace + } + } + } + } } -// parseK8SPolicyYAML decodes policy yaml into a slice of K8S NetworkPolicies. -func parseK8SPolicyYAML(policy string) (policies []*networkingv1.NetworkPolicy, err error) { - return ParsePolicyYAML[*networkingv1.NetworkPolicy](policy, clientsetscheme.Scheme) +func configureNamespaceInKNP(pol *networkingv1.NetworkPolicy, namespace string) { + pol.Namespace = namespace + + if pol.Spec.Size() != 0 { + for _, k := range []string{ + k8sConst.PodNamespaceLabel, + KubernetesSourcedLabelPrefix + k8sConst.PodNamespaceLabel, + AnySourceLabelPrefix + k8sConst.PodNamespaceLabel, + } { + for _, e := range pol.Spec.Egress { + for _, es := range e.To { + if es.PodSelector != nil { + if n, ok := es.PodSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.PodSelector.MatchLabels[k] = namespace + } + } + if es.NamespaceSelector != nil { + if n, ok := es.NamespaceSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.NamespaceSelector.MatchLabels[k] = namespace + } + } + } + } + for _, e := range pol.Spec.Ingress { + for _, es := range e.From { + if es.PodSelector != nil { + if n, ok := es.PodSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.PodSelector.MatchLabels[k] = namespace + } + } + if es.NamespaceSelector != nil { + if n, ok := es.NamespaceSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + es.NamespaceSelector.MatchLabels[k] = namespace + } + } + } + } + } + } } -// parseCiliumEgressGatewayPolicyYAML decodes policy yaml into a slice of -// CiliumEgressGatewayPolicies. -func parseCiliumEgressGatewayPolicyYAML(policy string) (cegps []*ciliumv2.CiliumEgressGatewayPolicy, err error) { - return ParsePolicyYAML[*ciliumv2.CiliumEgressGatewayPolicy](policy, scheme.Scheme) +// convertInto converts an object using JSON +func convertInto(input, output runtime.Object) error { + b, err := json.Marshal(input) + if err != nil { + return err // unreachable + } + return parseInto(b, output) } -// parseCiliumLocalRedirectPolicyYAML decodes policy yaml into a slice of -// CiliumLocalRedirectPolicies. -func parseCiliumLocalRedirectPolicyYAML(policy string) (clrp []*ciliumv2.CiliumLocalRedirectPolicy, err error) { - return ParsePolicyYAML[*ciliumv2.CiliumLocalRedirectPolicy](policy, scheme.Scheme) +func parseInto(b []byte, output runtime.Object) error { + _, _, err := serializer.NewCodecFactory(scheme.Scheme, serializer.EnableStrict).UniversalDeserializer().Decode(b, nil, output) + return err } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/result.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/result.go index d295c40d0d..de94e6b555 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/result.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/result.go @@ -181,6 +181,13 @@ var ( ExitCode: ExitAnyError, } + ResultEgressUnencryptedDrop = Result{ + Drop: true, + EgressDrop: true, + DropReasonFunc: unencryptedDropReason, + ExitCode: ExitCurlTimeout, + } + // ResultDropCurlTimeout expects a dropped flow and a failed command. ResultDropCurlTimeout = Result{ Drop: true, diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/test.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/test.go index a89c3af020..c6a5f54961 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/test.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/test.go @@ -7,6 +7,7 @@ import ( "bytes" "context" _ "embed" + "errors" "fmt" "io" "net" @@ -21,16 +22,17 @@ import ( "github.com/cloudflare/cfssl/signer" "github.com/cloudflare/cfssl/signer/local" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/yaml" "github.com/cilium/cilium/cilium-cli/defaults" + "github.com/cilium/cilium/cilium-cli/k8s" "github.com/cilium/cilium/cilium-cli/sysdump" "github.com/cilium/cilium/cilium-cli/utils/features" k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/lock" - "github.com/cilium/cilium/pkg/policy/api" "github.com/cilium/cilium/pkg/versioncheck" ) @@ -62,10 +64,7 @@ func NewTest(name string, verbose bool, debug bool) *Test { test := &Test{ name: name, scenarios: make(map[Scenario][]*Action), - cnps: make(map[string]*ciliumv2.CiliumNetworkPolicy), - ccnps: make(map[string]*ciliumv2.CiliumClusterwideNetworkPolicy), - knps: make(map[string]*networkingv1.NetworkPolicy), - cegps: make(map[string]*ciliumv2.CiliumEgressGatewayPolicy), + resources: []k8s.Object{}, clrps: make(map[string]*ciliumv2.CiliumLocalRedirectPolicy), logBuf: &bytes.Buffer{}, // maintain internal buffer by default conditionFn: nil, @@ -110,21 +109,14 @@ type Test struct { // Needs to be stored as a list, these are implemented in another package. scenariosSkipped []Scenario - // Policies active during this test. - cnps map[string]*ciliumv2.CiliumNetworkPolicy - - // Cilium Clusterwide Network Policies active during this test. - ccnps map[string]*ciliumv2.CiliumClusterwideNetworkPolicy - - // Kubernetes Network Policies active during this test. - knps map[string]*networkingv1.NetworkPolicy - - // Cilium Egress Gateway Policies active during this test. - cegps map[string]*ciliumv2.CiliumEgressGatewayPolicy - // Cilium Local Redirect Policies active during this test. clrps map[string]*ciliumv2.CiliumLocalRedirectPolicy + // k8s resources that should be created before the test run, and removed afterwards. + // If any of these correspond to a network policy, this will wait for the policy revision + // to be incremented. + resources []k8s.Object + // Secrets that have to be present during the test. secrets map[string]*corev1.Secret @@ -160,8 +152,8 @@ type Test struct { } func (t *Test) String() string { - return fmt.Sprintf("", - t.name, len(t.scenarios), len(t.cnps), len(t.ccnps), t.expectFunc) + return fmt.Sprintf("", + t.name, len(t.scenarios), len(t.resources), t.expectFunc) } // Name returns the name of the test. @@ -211,7 +203,7 @@ func (t *Test) setup(ctx context.Context) error { } // Apply CNPs & KNPs to the cluster. - if err := t.applyPolicies(ctx); err != nil { + if err := t.applyResources(ctx); err != nil { t.CiliumLogs(ctx) return fmt.Errorf("applying network policies: %w", err) } @@ -393,49 +385,6 @@ func (t *Test) Run(ctx context.Context, index int) error { return nil } -func configureNamespaceInPolicySpec(spec *api.Rule, namespace string) { - if spec == nil { - return - } - - for _, k := range []string{ - k8sConst.PodNamespaceLabel, - KubernetesSourcedLabelPrefix + k8sConst.PodNamespaceLabel, - AnySourceLabelPrefix + k8sConst.PodNamespaceLabel, - } { - for _, e := range spec.Egress { - for _, es := range e.ToEndpoints { - if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.MatchLabels[k] = namespace - } - } - } - for _, e := range spec.Ingress { - for _, es := range e.FromEndpoints { - if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.MatchLabels[k] = namespace - } - } - } - - for _, e := range spec.EgressDeny { - for _, es := range e.ToEndpoints { - if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.MatchLabels[k] = namespace - } - } - } - - for _, e := range spec.IngressDeny { - for _, es := range e.FromEndpoints { - if n, ok := es.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.MatchLabels[k] = namespace - } - } - } - } -} - // WithCondition takes a function containing condition check logic that // returns true if the test needs to be run, and false otherwise. If // WithCondition gets called multiple times, all the conditions need to be @@ -445,29 +394,41 @@ func (t *Test) WithCondition(fn func() bool) *Test { return t } +// WithResources registers the list of one or more YAML-defined +// Kubernetes resources (e.g. NetworkPolicy, etc.) +// +// # For certain well-known types, known references to the namespace are mutated +// +// If the resource has a namepace of "cilium-test", that is mutated +// to the (serialized) namespace of the individual scenario. +func (t *Test) WithResources(spec string) *Test { + buf := bytes.Buffer{} + buf.WriteString(spec) + decoder := yaml.NewYAMLOrJSONDecoder(&buf, 4096) + + for { + u := unstructured.Unstructured{} + if err := decoder.Decode(&u); err != nil { + if errors.Is(err, io.EOF) { + break + } + t.Fatalf("Parsing resource YAML: %s", err) + } + + if u.GetNamespace() == defaults.ConnectivityCheckNamespace { + u.SetNamespace(t.ctx.params.TestNamespace) + } + t.resources = append(t.resources, t.tweakPolicy(&u)) + } + return t +} + // WithCiliumPolicy takes a string containing a YAML policy document and adds // the polic(y)(ies) to the scope of the Test, to be applied when the test // starts running. When calling this method, note that the CNP enabled feature // // requirement is applied directly here. func (t *Test) WithCiliumPolicy(policy string) *Test { - pl, err := parseCiliumPolicyYAML(policy) - if err != nil { - t.Fatalf("Parsing policy YAML: %s", err) - } - - // Change the default test namespace as required. - for i := range pl { - pl[i].Namespace = t.ctx.params.TestNamespace - configureNamespaceInPolicySpec(pl[i].Spec, t.ctx.params.TestNamespace) - } - - if err := t.addCNPs(pl...); err != nil { - t.Fatalf("Adding CNPs to policy context: %s", err) - } - - t.WithFeatureRequirements(features.RequireEnabled(features.CNP)) - - return t + return t.WithResources(policy) } // WithCiliumClusterwidePolicy takes a string containing a YAML policy document @@ -475,23 +436,7 @@ func (t *Test) WithCiliumPolicy(policy string) *Test { // when the test starts running. When calling this method, note that the CCNP // enabled feature requirement is applied directly here. func (t *Test) WithCiliumClusterwidePolicy(policy string) *Test { - pl, err := parseCiliumClusterwidePolicyYAML(policy) - if err != nil { - t.Fatalf("Parsing policy YAML: %s", err) - } - - // Change the default test namespace as required. - for i := range pl { - configureNamespaceInPolicySpec(pl[i].Spec, t.ctx.params.TestNamespace) - } - - if err := t.addCCNPs(pl...); err != nil { - t.Fatalf("Adding CCNPs to policy context: %s", err) - } - - t.WithFeatureRequirements(features.RequireEnabled(features.CCNP)) - - return t + return t.WithResources(policy) } // WithK8SPolicy takes a string containing a YAML policy document and adds @@ -499,61 +444,7 @@ func (t *Test) WithCiliumClusterwidePolicy(policy string) *Test { // starts running. When calling this method, note that the KNP enabled feature // requirement is applied directly here. func (t *Test) WithK8SPolicy(policy string) *Test { - pl, err := parseK8SPolicyYAML(policy) - if err != nil { - t.Fatalf("Parsing K8S policy YAML: %s", err) - } - - // Change the default test namespace as required. - for i := range pl { - pl[i].Namespace = t.ctx.params.TestNamespace - - if pl[i].Spec.Size() != 0 { - for _, k := range []string{ - k8sConst.PodNamespaceLabel, - KubernetesSourcedLabelPrefix + k8sConst.PodNamespaceLabel, - AnySourceLabelPrefix + k8sConst.PodNamespaceLabel, - } { - for _, e := range pl[i].Spec.Egress { - for _, es := range e.To { - if es.PodSelector != nil { - if n, ok := es.PodSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.PodSelector.MatchLabels[k] = t.ctx.params.TestNamespace - } - } - if es.NamespaceSelector != nil { - if n, ok := es.NamespaceSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.NamespaceSelector.MatchLabels[k] = t.ctx.params.TestNamespace - } - } - } - } - for _, e := range pl[i].Spec.Ingress { - for _, es := range e.From { - if es.PodSelector != nil { - if n, ok := es.PodSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.PodSelector.MatchLabels[k] = t.ctx.params.TestNamespace - } - } - if es.NamespaceSelector != nil { - if n, ok := es.NamespaceSelector.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - es.NamespaceSelector.MatchLabels[k] = t.ctx.params.TestNamespace - } - } - } - } - } - } - } - - if err := t.addKNPs(pl...); err != nil { - t.Fatalf("Adding K8S Network Policies to policy context: %s", err) - } - - // It is implicit that KNP should be enabled. - t.WithFeatureRequirements(features.RequireEnabled(features.KNP)) - - return t + return t.WithResources(policy) } // CiliumLocalRedirectPolicyParams is used to configure a CiliumLocalRedirectPolicy template. @@ -572,21 +463,18 @@ type CiliumLocalRedirectPolicyParams struct { } func (t *Test) WithCiliumLocalRedirectPolicy(params CiliumLocalRedirectPolicyParams) *Test { - pl, err := parseCiliumLocalRedirectPolicyYAML(params.Policy) - if err != nil { + pl := ciliumv2.CiliumLocalRedirectPolicy{} + if err := parseInto([]byte(params.Policy), &pl); err != nil { t.Fatalf("Parsing local redirect policy YAML: %s", err) } - for i := range pl { - pl[i].Namespace = t.ctx.params.TestNamespace - pl[i].Name = params.Name - pl[i].Spec.RedirectFrontend.AddressMatcher.IP = params.FrontendIP - pl[i].Spec.SkipRedirectFromBackend = params.SkipRedirectFromBackend - } + pl.Namespace = t.ctx.params.TestNamespace + pl.Name = params.Name + pl.Spec.RedirectFrontend.AddressMatcher.IP = params.FrontendIP + pl.Spec.SkipRedirectFromBackend = params.SkipRedirectFromBackend - if err := t.addCLRPs(pl...); err != nil { - t.Fatalf("Adding CLRPs to cilium local redirect policy context: %s", err) - } + t.resources = append(t.resources, &pl) + t.clrps[params.Name] = &pl t.WithFeatureRequirements(features.RequireEnabled(features.LocalRedirectPolicy)) @@ -622,59 +510,55 @@ type CiliumEgressGatewayPolicyParams struct { // note that the egress gateway enabled feature requirement is applied directly // here. func (t *Test) WithCiliumEgressGatewayPolicy(params CiliumEgressGatewayPolicyParams) *Test { - pl, err := parseCiliumEgressGatewayPolicyYAML(egressGatewayPolicyYAML) - if err != nil { - t.Fatalf("Parsing policy YAML: %s", err) - } - - for i := range pl { - // Change the default test namespace as required. - for _, k := range []string{ - k8sConst.PodNamespaceLabel, - KubernetesSourcedLabelPrefix + k8sConst.PodNamespaceLabel, - AnySourceLabelPrefix + k8sConst.PodNamespaceLabel, - } { - for _, e := range pl[i].Spec.Selectors { - ps := e.PodSelector - if n, ok := ps.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { - ps.MatchLabels[k] = t.ctx.params.TestNamespace - } + pl := ciliumv2.CiliumEgressGatewayPolicy{} + if err := parseInto([]byte(egressGatewayPolicyYAML), &pl); err != nil { + t.Fatalf("Parsing EgressGatewayPolicy: %s", err) + } + + // Change the default test namespace as required. + for _, k := range []string{ + k8sConst.PodNamespaceLabel, + KubernetesSourcedLabelPrefix + k8sConst.PodNamespaceLabel, + AnySourceLabelPrefix + k8sConst.PodNamespaceLabel, + } { + for _, e := range pl.Spec.Selectors { + ps := e.PodSelector + if n, ok := ps.MatchLabels[k]; ok && n == defaults.ConnectivityCheckNamespace { + ps.MatchLabels[k] = t.ctx.params.TestNamespace } } + } - // Set the policy name - pl[i].Name = params.Name - - // Set the pod selector - pl[i].Spec.Selectors[0].PodSelector.MatchLabels["kind"] = params.PodSelectorKind + // Set the policy name + pl.Name = params.Name - // Set the egress gateway node - egressGatewayNode := t.EgressGatewayNode() - if egressGatewayNode == "" { - t.Fatalf("Cannot find egress gateway node") - } + // Set the pod selector + pl.Spec.Selectors[0].PodSelector.MatchLabels["kind"] = params.PodSelectorKind - pl[i].Spec.EgressGateway.NodeSelector.MatchLabels["kubernetes.io/hostname"] = egressGatewayNode + // Set the egress gateway node + egressGatewayNode := t.EgressGatewayNode() + if egressGatewayNode == "" { + t.Fatalf("Cannot find egress gateway node") + } - // Set the excluded CIDRs - pl[i].Spec.ExcludedCIDRs = []ciliumv2.IPv4CIDR{} + pl.Spec.EgressGateway.NodeSelector.MatchLabels["kubernetes.io/hostname"] = egressGatewayNode - switch params.ExcludedCIDRsConf { - case ExternalNodeExcludedCIDRs: - for _, nodeWithoutCiliumIP := range t.Context().params.NodesWithoutCiliumIPs { - if parsedIP := net.ParseIP(nodeWithoutCiliumIP.IP); parsedIP.To4() == nil { - continue - } + // Set the excluded CIDRs + pl.Spec.ExcludedCIDRs = []ciliumv2.IPv4CIDR{} - cidr := ciliumv2.IPv4CIDR(fmt.Sprintf("%s/32", nodeWithoutCiliumIP.IP)) - pl[i].Spec.ExcludedCIDRs = append(pl[i].Spec.ExcludedCIDRs, cidr) + switch params.ExcludedCIDRsConf { + case ExternalNodeExcludedCIDRs: + for _, nodeWithoutCiliumIP := range t.Context().params.NodesWithoutCiliumIPs { + if parsedIP := net.ParseIP(nodeWithoutCiliumIP.IP); parsedIP.To4() == nil { + continue } + + cidr := ciliumv2.IPv4CIDR(fmt.Sprintf("%s/32", nodeWithoutCiliumIP.IP)) + pl.Spec.ExcludedCIDRs = append(pl.Spec.ExcludedCIDRs, cidr) } } - if err := t.addCEGPs(pl...); err != nil { - t.Fatalf("Adding CEGPs to cilium egress gateway policy context: %s", err) - } + t.resources = append(t.resources, &pl) t.WithFeatureRequirements(features.RequireEnabled(features.EgressGateway)) @@ -949,16 +833,6 @@ func (t *Test) collectSysdump() { func (t *Test) ForEachIPFamily(do func(features.IPFamily)) { ipFams := []features.IPFamily{features.IPFamilyV4, features.IPFamilyV6} - // The per-endpoint routes feature is broken with IPv6 on < v1.14 when there - // are any netpols installed (https://github.com/cilium/cilium/issues/23852 - // and https://github.com/cilium/cilium/issues/23910). - if f, ok := t.Context().Feature(features.EndpointRoutes); ok && - f.Enabled && (len(t.cnps) > 0 || len(t.knps) > 0) && - versioncheck.MustCompile("<1.14.0")(t.Context().CiliumVersion) { - - ipFams = []features.IPFamily{features.IPFamilyV4} - } - for _, ipFam := range ipFams { switch ipFam { case features.IPFamilyV4: @@ -979,18 +853,6 @@ func (t *Test) CertificateCAs() map[string][]byte { return t.certificateCAs } -func (t *Test) CiliumNetworkPolicies() map[string]*ciliumv2.CiliumNetworkPolicy { - return t.cnps -} - -func (t *Test) CiliumClusterwideNetworkPolicies() map[string]*ciliumv2.CiliumClusterwideNetworkPolicy { - return t.ccnps -} - -func (t *Test) KubernetesNetworkPolicies() map[string]*networkingv1.NetworkPolicy { - return t.knps -} - func (t *Test) CiliumLocalRedirectPolicies() map[string]*ciliumv2.CiliumLocalRedirectPolicy { return t.clrps } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/wait.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/wait.go index 74c4a8f628..e0c80839cd 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/wait.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/check/wait.go @@ -23,7 +23,6 @@ import ( "github.com/cilium/cilium/cilium-cli/k8s" "github.com/cilium/cilium/cilium-cli/utils/features" "github.com/cilium/cilium/cilium-cli/utils/wait" - "github.com/cilium/cilium/pkg/inctimer" ) const ( @@ -48,7 +47,7 @@ func WaitForDeployment(ctx context.Context, log Logger, client *k8s.Client, name log.Debugf("[%s] Deployment %s/%s is not yet ready: %s", client.ClusterName(), namespace, name, err) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for deployment %s/%s to become ready (last error: %w)", namespace, name, err) @@ -71,7 +70,7 @@ func WaitForDaemonSet(ctx context.Context, log Logger, client *k8s.Client, names log.Debugf("[%s] DaemonSet %s/%s is not yet ready: %s", client.ClusterName(), namespace, name, err) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for DaemonSet %s/%s to become ready (last error: %w)", namespace, name, err) @@ -103,7 +102,7 @@ func WaitForPodDNS(ctx context.Context, log Logger, src, dst Pod) error { src.K8sClient.ClusterName(), target, src.Name(), dst.Name(), err, stdout.String()) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for lookup for %s from pod %s to server on pod %s to succeed (last error: %w)", target, src.Name(), dst.Name(), err, @@ -131,7 +130,7 @@ func WaitForCoreDNS(ctx context.Context, log Logger, client Pod) error { client.K8sClient.ClusterName(), target, client.Name(), err, stdout.String()) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for lookup for %s from pod %s to succeed (last error: %w)", target, client.Name(), err) @@ -154,7 +153,7 @@ func WaitForServiceRetrieval(ctx context.Context, log Logger, client *k8s.Client log.Debugf("[%s] Failed to retrieve Service %s/%s: %s", client.ClusterName(), namespace, name, err) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return Service{}, fmt.Errorf("timeout reached waiting for service %s/%s to be retrieved (last error: %w)", namespace, name, err) @@ -166,7 +165,7 @@ func WaitForServiceRetrieval(ctx context.Context, log Logger, client *k8s.Client func WaitForService(ctx context.Context, log Logger, client Pod, service Service) error { log.Logf("⌛ [%s] Waiting for Service %s to become ready...", client.K8sClient.ClusterName(), service.Name()) - ctx, cancel := context.WithTimeout(ctx, ShortTimeout) + ctx, cancel := context.WithTimeout(ctx, 2*ShortTimeout) defer cancel() if service.Service.Spec.ClusterIP == corev1.ClusterIPNone { @@ -197,7 +196,7 @@ func WaitForService(ctx context.Context, log Logger, client Pod, service Service client.K8sClient.ClusterName(), service.Name(), err, stdout.String()) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for service %s (last error: %w)", service.Name(), err) } @@ -228,7 +227,7 @@ func WaitForServiceEndpoints(ctx context.Context, log Logger, agent Pod, service agent.K8sClient.ClusterName(), service.Name(), agent.Name(), err) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for service %s to appear in Cilium pod %s (last error: %w)", service.Name(), agent.Name(), err) @@ -308,7 +307,7 @@ func WaitForNodePorts(ctx context.Context, log Logger, client Pod, nodeIP string client.K8sClient.ClusterName(), nodeIP, nodePort, service.Name(), err, stdout.String()) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for NodePort %s:%d (%s) (last error: %w)", nodeIP, nodePort, service.Name(), err) @@ -336,7 +335,7 @@ func WaitForIPCache(ctx context.Context, log Logger, agent Pod, pods []Pod) erro log.Debugf("[%s] Error checking pod IPs in IPCache: %s", agent.K8sClient.ClusterName(), err) select { - case <-inctimer.After(PollInterval): + case <-time.After(PollInterval): case <-ctx.Done(): return fmt.Errorf("timeout reached waiting for pod IPs to be in IPCache of Cilium pod %s (last error: %w)", agent.Name(), err) diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/sniff/sniffer.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/sniff/sniffer.go index bcba0dc1f3..f035d74a02 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/sniff/sniffer.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/sniff/sniffer.go @@ -13,7 +13,6 @@ import ( "github.com/cilium/cilium/cilium-cli/connectivity/check" "github.com/cilium/cilium/cilium-cli/utils/lock" - "github.com/cilium/cilium/pkg/inctimer" ) // Mode configures the Sniffer validation mode. @@ -99,7 +98,7 @@ func Sniff(ctx context.Context, name string, target *check.Pod, } return nil, fmt.Errorf("Failed to execute tcpdump: %w", err) - case <-inctimer.After(100 * time.Millisecond): + case <-time.After(100 * time.Millisecond): line, err := sniffer.stdout.ReadString('\n') if err != nil && !errors.Is(err, io.EOF) { return nil, fmt.Errorf("Failed to read kubectl exec's stdout: %w", err) diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/encryption.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/encryption.go index 819f2e0e40..f42a4c0fba 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/encryption.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/encryption.go @@ -6,12 +6,18 @@ package tests import ( "context" "fmt" + "maps" + "slices" "strings" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "github.com/cilium/cilium/cilium-cli/connectivity/check" "github.com/cilium/cilium/cilium-cli/connectivity/sniff" "github.com/cilium/cilium/cilium-cli/utils/features" "github.com/cilium/cilium/pkg/defaults" + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/versioncheck" ) @@ -197,6 +203,26 @@ func isWgEncap(t *check.Test) bool { return true } +// checkIPSecPodToPod checks whether in case of IPSec being enabled and used +// during the podToPodEncryption test it should be executed or skipped due to: +// +// 1. missing backporting of the commit in previous versions; +// 2. usage of IPv6, for which the test is flaky (see https://github.com/cilium/cilium/issues/35485). +// +// Once the above reasons are fixed, this function can be removed. +func checkIPSecPodToPod(t *check.Test, ipFam features.IPFamily) error { + if e, ok := t.Context().Feature(features.EncryptionPod); !(ok && e.Enabled && e.Mode == "ipsec") { + return nil + } + if !versioncheck.MustCompile(">=1.17.0")(t.Context().CiliumVersion) { + return fmt.Errorf("enabling test for IPSec requires backporting") + } + if ipFam == features.IPFamilyV6 { + return fmt.Errorf("inactive IPv6 test with IPSec, see https://github.com/cilium/cilium/issues/35485") + } + return nil +} + // PodToPodEncryption is a test case which checks the following: // - There is a connectivity between pods on different nodes when any // encryption mode is on (either WireGuard or IPsec). @@ -246,6 +272,10 @@ func (s *podToPodEncryption) Run(ctx context.Context, t *check.Test) { } t.ForEachIPFamily(func(ipFam features.IPFamily) { + if err := checkIPSecPodToPod(t, ipFam); err != nil { + t.Debugf("Skipping test: %v", err) + return + } testNoTrafficLeak(ctx, t, s, client, &server, &clientHost, &serverHost, requestHTTP, ipFam, assertNoLeaks, true, wgEncap) }) } @@ -300,6 +330,65 @@ func testNoTrafficLeak(ctx context.Context, t *check.Test, s check.Scenario, } } +func nodeToNodeEncTestPods(nodes map[check.NodeIdentity]*ciliumv2.CiliumNode, excludeSelector labels.Selector, clients, servers []check.Pod) (client, server *check.Pod) { + nodeKey := func(pod *check.Pod) check.NodeIdentity { + if pod != nil { + return check.NodeIdentity{Cluster: pod.K8sClient.ClusterName(), Name: pod.NodeName()} + } + return check.NodeIdentity{} + } + + acceptableNodes := func(pods []check.Pod) sets.Set[check.NodeIdentity] { + keys := sets.New[check.NodeIdentity]() + for _, pod := range pods { + node := nodes[nodeKey(&pod)] + if node == nil { + continue + } + + if excludeSelector.Matches(labels.Set(node.Labels)) { + continue + } + + keys.Insert(nodeKey(&pod)) + } + return keys + } + + getRandomPod := func(pods []check.Pod, nodes sets.Set[check.NodeIdentity]) *check.Pod { + for _, pod := range pods { + if nodes.Has(nodeKey(&pod)) { + return &pod + } + } + + return nil + } + + clientNodes := acceptableNodes(clients) + serverNodes := acceptableNodes(servers) + + // Prefer selecting a client (server) running on a node which does not + // host a server (client) as well, to maximize the possibilities of finding + // a valid combination. + clientNodesOnly := clientNodes.Difference(serverNodes) + serverNodesOnly := serverNodes.Difference(clientNodes) + + client = getRandomPod(clients, clientNodesOnly) + if client == nil { + client = getRandomPod(clients, clientNodes) + } + + server = getRandomPod(servers, serverNodesOnly) + if server == nil { + // Make sure to not pick a server hosted on the same node of the client. + serverNodes.Delete(nodeKey(client)) + server = getRandomPod(servers, serverNodes) + } + + return client, server +} + func NodeToNodeEncryption(reqs ...features.Requirement) check.Scenario { return &nodeToNodeEncryption{reqs} } @@ -311,17 +400,28 @@ func (s *nodeToNodeEncryption) Name() string { } func (s *nodeToNodeEncryption) Run(ctx context.Context, t *check.Test) { - client := t.Context().RandomClientPod() - - var server check.Pod - for _, pod := range t.Context().EchoPods() { - // Make sure that the server pod is on another node than client - if pod.Pod.Status.HostIP != client.Pod.Status.HostIP { - server = pod - break + ct := t.Context() + encryptNode, _ := ct.Feature(features.EncryptionNode) + + // Node to node encryption can be disabled on specific nodes (e.g., + // control plane ones) to prevent e.g., losing connectivity to the + // Kubernetes API Server. Let's take that into account when selecting + // the target pods/nodes. + excludeNodes := labels.Nothing() + if encryptNode.Enabled { + var err error + if excludeNodes, err = labels.Parse(encryptNode.Mode); err != nil { + t.Fatalf("unable to parse label selector %s: %s", encryptNode.Mode, err) } } + client, server := nodeToNodeEncTestPods(ct.CiliumNodes(), excludeNodes, + slices.Collect(maps.Values(ct.ClientPods())), + slices.Collect(maps.Values(ct.EchoPods()))) + if client == nil || server == nil { + t.Fatal("Could not find matching pods: is node to node encryption disabled on all nodes hosting test pods?") + } + // clientHost is a pod running on the same node as the client pod, just in // the host netns. clientHost := t.Context().HostNetNSPodsByNode()[client.Pod.Spec.NodeName] @@ -362,6 +462,6 @@ func (s *nodeToNodeEncryption) Run(ctx context.Context, t *check.Test) { if onlyPodToPodWGWithTunnel { hostToPodAssertNoLeaks = true } - testNoTrafficLeak(ctx, t, s, &clientHost, &server, &clientHost, &serverHost, requestHTTP, ipFam, hostToPodAssertNoLeaks, false, wgEncap) + testNoTrafficLeak(ctx, t, s, &clientHost, server, &clientHost, &serverHost, requestHTTP, ipFam, hostToPodAssertNoLeaks, false, wgEncap) }) } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/health.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/health.go index 3de33302e6..058ed288d4 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/health.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/health.go @@ -17,7 +17,6 @@ import ( "github.com/cilium/cilium/cilium-cli/connectivity/check" "github.com/cilium/cilium/cilium-cli/defaults" - "github.com/cilium/cilium/pkg/inctimer" ) func CiliumHealth() check.Scenario { @@ -44,8 +43,6 @@ func runHealthProbe(ctx context.Context, t *check.Test, pod *check.Pod) { // Probe health status until it passes checks or timeout is reached. for { - retryTimer := inctimer.After(time.Second) - if _, err := pod.K8sClient.GetPod(ctx, pod.Pod.Namespace, pod.Pod.Name, metav1.GetOptions{}); k8serrors.IsNotFound(err) { t.Failf("cilium-health validation failed. Cilium Agent Pod %s/%s no longer exists", pod.Pod.Namespace, pod.Pod.Name) return @@ -66,7 +63,7 @@ func runHealthProbe(ctx context.Context, t *check.Test, pod *check.Pod) { case <-done: t.Context().Fatalf("cilium-health probe on '%s' failed: %s", pod.Name(), err) return - case <-retryTimer: + case <-time.After(time.Second): } } } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/multicast.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/multicast.go new file mode 100644 index 0000000000..76a377f2c0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/multicast.go @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package tests + +import ( + "context" + "errors" + "fmt" + "net/netip" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/cilium/ebpf" + + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/node/addressing" + + "github.com/cilium/cilium/cilium-cli/connectivity/check" + "github.com/cilium/cilium/cilium-cli/defaults" + "github.com/cilium/cilium/cilium-cli/utils/features" +) + +const ( + testMulticastGroupIP = "239.255.9.9" + testSocatPort = 6666 +) + +// Having data to restore group and subscriber status after testing +var NodeWithoutGroup []string +var NotSubscribePodAddress map[string][]v2.NodeAddress +var ipToPodMap lock.Map[v2.NodeAddress, string] +var NodeWithoutGroupMu lock.RWMutex +var NotSubscribePodAddressMu lock.RWMutex + +type socatMulticast struct { +} + +func SocatMulticast() check.Scenario { + return &socatMulticast{} +} + +func (s *socatMulticast) Name() string { + return "multicast" +} + +func (s *socatMulticast) Run(ctx context.Context, t *check.Test) { + ct := t.Context() + defer func() { + s.cleanup(ctx, t) + }() + NotSubscribePodAddress = make(map[string][]v2.NodeAddress) + + // Add all cilium nodes to the multicast group + if err := s.addAllNodes(ctx, t); err != nil { + t.Fatalf("Fatal error occurred while adding all cilium nodes to multicast group: %v", err) + } + + bgCtx, cancelBg := context.WithCancel(ctx) + defer cancelBg() + + var wg sync.WaitGroup + + // Sender: Start repeated socat multicast client in the background) + for _, clientPod := range ct.SocatClientPods() { + wg.Add(1) + go func(pod check.Pod) { + defer wg.Done() + cmd := ct.SocatClientCommand(testSocatPort, testMulticastGroupIP) + doneCh := make(chan struct{}) + go func() { + _, stdErr, err := pod.K8sClient.ExecInPodWithStderr(bgCtx, pod.Pod.Namespace, pod.Pod.Name, pod.Pod.Labels["name"], cmd) + if err != nil && !strings.Contains(err.Error(), "context canceled") { + errMsg := fmt.Sprintf("Error: %v, Stderr: %s", err, stdErr.String()) + t.Logf("Error in background task for pod %s: %v", pod.Name(), errMsg) + } + close(doneCh) + }() + select { + case <-doneCh: + // Task finished normally + case <-bgCtx.Done(): + // Context was cancelled, handle cleanup + cancelCmd := ct.KillMulticastTestSender() + _, _, err := pod.K8sClient.ExecInPodWithStderr(ctx, pod.Pod.Namespace, pod.Pod.Name, pod.Pod.Labels["name"], cancelCmd) + if err != nil { + t.Logf("Error cancelling command for pod %s: %v", pod.Name(), err) + } + } + }(clientPod) + } + + // Receiver: Execute socat multicast server and check if multicast packets are coming in. + for _, socatServerPod := range ct.SocatServerPods() { + t.NewAction(s, "socat multicast", &socatServerPod, nil, features.IPFamilyV4).Run(func(a *check.Action) { + cmd := ct.SocatServer1secCommand(socatServerPod, testSocatPort, testMulticastGroupIP) + // The exit code of socat server command with timeout is 0 if a packet is received, + // and 124 if no packet is received. + a.ExecInPod(ctx, cmd) + }) + } + + cancelBg() + wg.Wait() +} + +// Restore the state of the multicast group and subscriber after the test +func (s *socatMulticast) cleanup(ctx context.Context, t *check.Test) { + ct := t.Context() + client := ct.K8sClient() + ciliumNodesList, err := client.ListCiliumNodes(ctx) + if err != nil { + t.Fatalf("Fatal error occurred while listing cilium nodes: %v", err) + } + ciliumNodes := ciliumNodesList.Items + for _, ciliumNode := range ciliumNodes { + if s.isNodeWithoutGroup(ciliumNode.Name) { + if err := s.delGroup(ctx, t, ciliumNode.Name); err != nil { + t.Fatalf("Fatal error occurred while deleting multicast group: %v", err) + } + } else { + for _, podAddress := range NotSubscribePodAddress[ciliumNode.Name] { + if s.isNotSubscribePodAddress(ciliumNode.Name, podAddress) { + if err := s.delSubscriber(ctx, t, ciliumNode.Name, podAddress.IP); err != nil { + t.Fatalf("Fatal error occurred while deleting subscriber: %v", err) + } + } + } + } + } +} + +func (s *socatMulticast) addNodeWithoutGroup(nodeName string) { + NodeWithoutGroupMu.Lock() + defer NodeWithoutGroupMu.Unlock() + NodeWithoutGroup = append(NodeWithoutGroup, nodeName) +} + +func (s *socatMulticast) isNodeWithoutGroup(nodeName string) bool { + NodeWithoutGroupMu.RLock() + defer NodeWithoutGroupMu.RUnlock() + for _, node := range NodeWithoutGroup { + if node == nodeName { + return true + } + } + return false +} + +func (s *socatMulticast) addNotSubscribePodAddress(nodeName string, podAddress v2.NodeAddress) { + NotSubscribePodAddressMu.Lock() + defer NotSubscribePodAddressMu.Unlock() + NotSubscribePodAddress[nodeName] = append(NotSubscribePodAddress[nodeName], podAddress) +} + +func (s *socatMulticast) isNotSubscribePodAddress(nodeName string, podAddress v2.NodeAddress) bool { + NotSubscribePodAddressMu.RLock() + defer NotSubscribePodAddressMu.RUnlock() + for _, address := range NotSubscribePodAddress[nodeName] { + if address.IP == podAddress.IP { + return true + } + } + return false +} + +func (s *socatMulticast) getCiliumNode(ctx context.Context, t *check.Test, nodeName string) (v2.CiliumNode, error) { + ct := t.Context() + client := ct.K8sClient() + ciliumNodes, err := client.ListCiliumNodes(ctx) + if err != nil { + return v2.CiliumNode{}, err + } + var ciliumNode v2.CiliumNode + for _, node := range ciliumNodes.Items { + if node.Name == nodeName { + ciliumNode = node + } + } + return ciliumNode, nil +} + +func (s *socatMulticast) getCiliumInternalIP(ctx context.Context, t *check.Test, nodeName string) (v2.NodeAddress, error) { + ciliumNode, err := s.getCiliumNode(ctx, t, nodeName) + if err != nil { + return v2.NodeAddress{}, fmt.Errorf("unable to get cilium node: %w", err) + } + addrs := ciliumNode.Spec.Addresses + var ciliumInternalIP v2.NodeAddress + for _, addr := range addrs { + if addr.AddrType() == addressing.NodeCiliumInternalIP { + ip, err := netip.ParseAddr(addr.IP) + if err != nil { + continue + } + if ip.Is4() { + ciliumInternalIP = addr + } + } + } + if ciliumInternalIP.IP == "" { + return v2.NodeAddress{}, fmt.Errorf("ciliumInternalIP not found") + } + return ciliumInternalIP, nil +} + +// To record the correspondence between CiliumInternalIp and cilium-agent +func (s *socatMulticast) populateMaps(ctx context.Context, t *check.Test, ciliumPods []corev1.Pod) error { + var wg sync.WaitGroup + errCh := make(chan error, len(ciliumPods)) + wg.Add(len(ciliumPods)) + + for _, ciliumPod := range ciliumPods { + go func(pod corev1.Pod) { + defer wg.Done() + ciliumInternalIP, err := s.getCiliumInternalIP(ctx, t, pod.Spec.NodeName) + if err != nil { + errCh <- err + return + } + ipToPodMap.Store(ciliumInternalIP, pod.Name) + }(ciliumPod) + } + + wg.Wait() + close(errCh) + + var errRet error + for fetchData := range errCh { + errRet = errors.Join(errRet, fetchData) + } + return errRet +} + +// create multicast group and add all cilium nodes to the multicast group for testing +func (s *socatMulticast) addAllNodes(ctx context.Context, t *check.Test) error { + ct := t.Context() + client := ct.K8sClient() + + ciliumPodsList, err := client.ListPods(ctx, ct.Params().CiliumNamespace, metav1.ListOptions{LabelSelector: defaults.AgentPodSelector}) + if err != nil { + return err + } + ciliumPods := ciliumPodsList.Items + + // Create a map of ciliumInternalIPs of all nodes + if err := s.populateMaps(ctx, t, ciliumPods); err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, len(ciliumPods)) + wg.Add(len(ciliumPods)) + + for _, ciliumPod := range ciliumPods { + go func(pod corev1.Pod) { + defer wg.Done() + // If there are not specified multicast group, create it + cmd := []string{"cilium-dbg", "bpf", "multicast", "subscriber", "list", testMulticastGroupIP} + _, stdErr, err := client.ExecInPodWithStderr(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, cmd) + if err != nil { + if !strings.Contains(stdErr.String(), ebpf.ErrKeyNotExist.Error()) { + errMsg := fmt.Sprintf("Error: %v, Stderr: %s", err, stdErr.String()) + errCh <- errors.New(errMsg) + t.Fatalf("Fatal error occurred while checking multicast group %s in %s", testMulticastGroupIP, pod.Spec.NodeName) + return + } + s.addNodeWithoutGroup(pod.Spec.NodeName) + cmd = []string{"cilium-dbg", "bpf", "multicast", "group", "add", testMulticastGroupIP} + _, stdErr, err := client.ExecInPodWithStderr(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, cmd) + if err != nil { + errMsg := fmt.Sprintf("Error: %v, Stderr: %s", err, stdErr.String()) + errCh <- errors.New(errMsg) + t.Fatalf("Fatal error occurred while creating multicast group %s in %s", testMulticastGroupIP, pod.Spec.NodeName) + return + } + } + // Add all ciliumInternalIPs of all nodes to the multicast group as subscribers + ipToPodMap.Range(func(ip v2.NodeAddress, podName string) bool { + if ip.IP != "" && pod.Name != podName { // My node itself does not need to be in a multicast group. + cmd = []string{"cilium-dbg", "bpf", "multicast", "subscriber", "add", testMulticastGroupIP, ip.IP} + _, stdErr, err := client.ExecInPodWithStderr(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, cmd) + if err == nil { + s.addNotSubscribePodAddress(pod.Spec.NodeName, ip) + } else if !strings.Contains(stdErr.String(), ebpf.ErrKeyExist.Error()) { + errMsg := fmt.Sprintf("Error: %v, Stderr: %s", err, stdErr.String()) + errCh <- errors.New(errMsg) + t.Fatalf("Fatal error occurred while adding node %s to multicast group %s in %s", ip.IP, testMulticastGroupIP, pod.Spec.NodeName) + return false // Stop iteration + } + } + return true // Continue iteration + }) + }(ciliumPod) + } + + wg.Wait() + close(errCh) + + var errRet error + for fetchData := range errCh { + errRet = errors.Join(errRet, fetchData) + } + return errRet +} + +// Delete multicast group in designated node +func (s *socatMulticast) delGroup(ctx context.Context, t *check.Test, nodeName string) error { + ct := t.Context() + client := ct.K8sClient() + + ciliumPodsList, err := client.ListPods(ctx, ct.Params().CiliumNamespace, metav1.ListOptions{LabelSelector: defaults.AgentPodSelector}) + if err != nil { + return err + } + ciliumPods := ciliumPodsList.Items + + for _, ciliumPod := range ciliumPods { + if nodeName == ciliumPod.Spec.NodeName { + cmd := []string{"cilium-dbg", "bpf", "multicast", "group", "delete", testMulticastGroupIP} + _, stdErr, err := client.ExecInPodWithStderr(ctx, ciliumPod.Namespace, ciliumPod.Name, defaults.AgentContainerName, cmd) + if err != nil { + if !strings.Contains(stdErr.String(), ebpf.ErrKeyNotExist.Error()) { + errMsg := fmt.Sprintf("Error: %v while deleting Multicast Group for test %s, Stderr: %s", err, testMulticastGroupIP, stdErr.String()) + return errors.New(errMsg) + } + } + break + } + } + return nil +} + +// Delete designated subscriber in designated node +func (s *socatMulticast) delSubscriber(ctx context.Context, t *check.Test, nodeName string, subscriberIP string) error { + ct := t.Context() + client := ct.K8sClient() + + ciliumPodsList, err := client.ListPods(ctx, ct.Params().CiliumNamespace, metav1.ListOptions{LabelSelector: defaults.AgentPodSelector}) + if err != nil { + return err + } + ciliumPods := ciliumPodsList.Items + + for _, ciliumPod := range ciliumPods { + if nodeName == ciliumPod.Spec.NodeName { + cmd := []string{"cilium-dbg", "bpf", "multicast", "subscriber", "delete", testMulticastGroupIP, subscriberIP} + _, stdErr, err := client.ExecInPodWithStderr(ctx, ciliumPod.Namespace, ciliumPod.Name, defaults.AgentContainerName, cmd) + if err != nil { + if !strings.Contains(stdErr.String(), ebpf.ErrKeyNotExist.Error()) { + errMsg := fmt.Sprintf("Error: %v while removing %s from Multicast Group %s Stderr: %s", err, subscriberIP, testMulticastGroupIP, stdErr.String()) + return errors.New(errMsg) + } + } + break + } + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/pod.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/pod.go index 04cc8eabd3..54c6c1a4e4 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/pod.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/pod.go @@ -6,10 +6,12 @@ package tests import ( "context" "fmt" + "regexp" "strconv" "strings" "github.com/cilium/cilium/cilium-cli/connectivity/check" + "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/utils/features" ) @@ -253,3 +255,97 @@ func (s *podToPodNoFrag) Run(ctx context.Context, t *check.Test) { }) } + +func PodToPodMissingIPCache(opts ...Option) check.Scenario { + options := &labelsOption{} + for _, opt := range opts { + opt(options) + } + return &podToPodMissingIPCache{ + sourceLabels: options.sourceLabels, + destinationLabels: options.destinationLabels, + method: options.method, + } +} + +type podToPodMissingIPCache struct { + sourceLabels map[string]string + destinationLabels map[string]string + method string +} + +func (s *podToPodMissingIPCache) Name() string { + return "pod-to-pod-missing-ipcache" +} + +func (s *podToPodMissingIPCache) Run(ctx context.Context, t *check.Test) { + var i int + ct := t.Context() + + // Temporarily delete echo pods entries from ipcache + ipcacheGetPat := regexp.MustCompile(`identity=(\d+)\s+encryptkey=(\d+)\s+tunnelendpoint=([\d\.]+)`) + for _, echo := range ct.EchoPods() { + echoIP := echo.Address(features.IPFamilyV4) + for _, ciliumPod := range ct.CiliumPods() { + lookupCmd := []string{"cilium", "bpf", "ipcache", "get", echoIP} + output, err := ciliumPod.K8sClient.ExecInPod(ctx, ciliumPod.Pod.Namespace, ciliumPod.Pod.Name, defaults.AgentContainerName, lookupCmd) + if err != nil { + ct.Warnf(`failed to lookup IP cache entry: "%s", %v, "%s"`, lookupCmd, err, output.String()) + continue + } + matches := ipcacheGetPat.FindStringSubmatch(output.String()) + identity := matches[1] + encryptkey := matches[2] + tunnelendpoint := matches[3] + + deleteCmd := []string{"cilium", "bpf", "ipcache", "delete", echoIP + "/32"} + if output, err = ciliumPod.K8sClient.ExecInPod(ctx, ciliumPod.Pod.Namespace, ciliumPod.Pod.Name, defaults.AgentContainerName, deleteCmd); err != nil { + ct.Warnf(`failed to delete IP cache entry: "%s", %v, "%s"`, deleteCmd, err, output.String()) + continue + } + + updateCmd := []string{"cilium", "bpf", "ipcache", "update", echoIP + "/32"} + updateCmd = append(updateCmd, "--tunnelendpoint", tunnelendpoint, "--identity", identity, "--encryptkey", encryptkey) + defer func(ciliumPod check.Pod, updateCmd []string) { + output, err := ciliumPod.K8sClient.ExecInPod(ctx, ciliumPod.Pod.Namespace, ciliumPod.Pod.Name, defaults.AgentContainerName, updateCmd) + if err != nil { + ct.Warnf(`failed to restore IP cache entry: "%s", %v, "%s"`, updateCmd, err, output.String()) + } + }(ciliumPod, updateCmd) + } + } + + for _, client := range ct.ClientPods() { + if !hasAllLabels(client, s.sourceLabels) { + continue + } + for _, echo := range ct.EchoPods() { + if !hasAllLabels(echo, s.destinationLabels) { + continue + } + + // Skip if echo pod is on the same node as client + if echo.Pod.Spec.NodeName == client.Pod.Spec.NodeName { + continue + } + + t.ForEachIPFamily(func(ipFam features.IPFamily) { + if ipFam == features.IPFamilyV6 { + // encryption-strict-mode-cidr only accepts an IPv4 CIDR + return + } + t.NewAction(s, fmt.Sprintf("curl-%s-%d", ipFam, i), &client, echo, ipFam).Run(func(a *check.Action) { + a.ExecInPod(ctx, ct.CurlCommand(echo, ipFam)) + + a.ValidateFlows(ctx, client, a.GetEgressRequirements(check.FlowParameters{})) + a.ValidateFlows(ctx, echo, a.GetIngressRequirements(check.FlowParameters{})) + + a.ValidateMetrics(ctx, echo, a.GetIngressMetricsRequirements()) + a.ValidateMetrics(ctx, echo, a.GetEgressMetricsRequirements()) + }) + }) + + i++ + } + } +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/service.go b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/service.go index ee516629e5..74ffa2fd8f 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/service.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/connectivity/tests/service.go @@ -12,7 +12,6 @@ import ( "github.com/cilium/cilium/cilium-cli/connectivity/check" "github.com/cilium/cilium/cilium-cli/utils/features" - "github.com/cilium/cilium/pkg/versioncheck" ) // PodToService sends an HTTP request from all client Pods @@ -228,13 +227,6 @@ func curlNodePort(ctx context.Context, s check.Scenario, t *check.Test, } } - // Skip IPv6 requests when running on <1.14.0 Cilium with CNPs - if features.GetIPFamily(addr.Address) == features.IPFamilyV6 && - versioncheck.MustCompile("<1.14.0")(t.Context().CiliumVersion) && - (len(t.CiliumNetworkPolicies()) > 0 || len(t.KubernetesNetworkPolicies()) > 0) { - continue - } - // Manually construct an HTTP endpoint to override the destination IP // and port of the request. ep := check.HTTPEndpoint(name, fmt.Sprintf("%s://%s:%d%s", svc.Scheme(), addr.Address, np, svc.Path())) diff --git a/vendor/github.com/cilium/cilium/cilium-cli/defaults/defaults.go b/vendor/github.com/cilium/cilium/cilium-cli/defaults/defaults.go index c596c70eb4..4248db619e 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/defaults/defaults.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/defaults/defaults.go @@ -76,6 +76,8 @@ const ( ConnectivityTestConnDisruptImage = "quay.io/cilium/test-connection-disruption:v0.0.14@sha256:c3fd56e326ae16f6cb63dbb2e26b4e47ec07a123040623e11399a7fe1196baa0" // renovate: datasource=docker ConnectivityTestFRRImage = "quay.io/frrouting/frr:10.1.1@sha256:7c7901eb5611f12634395c949e59663e154b37cf006f32c7f4c8650884cdc0b1" + // renovate: datasource=docker + ConnectivityTestSocatImage = "docker.io/alpine/socat:1.8.0.0@sha256:a6be4c0262b339c53ddad723cdd178a1a13271e1137c65e27f90a08c16de02b8" ConfigMapName = "cilium-config" @@ -87,7 +89,7 @@ const ( FlowWaitTimeout = 10 * time.Second FlowRetryInterval = 500 * time.Millisecond - PolicyWaitTimeout = 15 * time.Second + PolicyWaitTimeout = 30 * time.Second ConnectRetry = 3 ConnectRetryDelay = 3 * time.Second @@ -158,6 +160,7 @@ var ( "Host datapath not ready", "Unknown ICMPv4 code", "Forbidden ICMPv6 message", + "No egress gateway found", } ExpectedXFRMErrors = []string{ diff --git a/vendor/github.com/cilium/cilium/cilium-cli/hubble/hubble.go b/vendor/github.com/cilium/cilium/cilium-cli/hubble/hubble.go index 911b0f688a..6b5cd93e7e 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/hubble/hubble.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/hubble/hubble.go @@ -9,18 +9,12 @@ import ( "io" "helm.sh/helm/v3/pkg/cli/values" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/internal/helm" "github.com/cilium/cilium/cilium-cli/k8s" ) -type k8sHubbleImplementation interface { - GetService(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*corev1.Service, error) -} - type Parameters struct { Namespace string Relay bool @@ -28,7 +22,6 @@ type Parameters struct { UI bool UIPortForward int Writer io.Writer - Context string // Only for 'kubectl' pass-through commands // UIOpenBrowser will automatically open browser if true UIOpenBrowser bool diff --git a/vendor/github.com/cilium/cilium/cilium-cli/hubble/relay.go b/vendor/github.com/cilium/cilium/cilium-cli/hubble/relay.go index 137716f04d..0de0af2edf 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/hubble/relay.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/hubble/relay.go @@ -7,28 +7,16 @@ import ( "context" "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/cilium/cilium/cilium-cli/internal/utils" + "github.com/cilium/cilium/cilium-cli/k8s" ) -func (p *Parameters) RelayPortForwardCommand(ctx context.Context, client k8sHubbleImplementation) error { - relaySvc, err := client.GetService(ctx, p.Namespace, "hubble-relay", metav1.GetOptions{}) +func (p *Parameters) RelayPortForwardCommand(ctx context.Context, k8sClient *k8s.Client) error { + // default to first port configured on the service when svcPort is set to 0 + res, err := k8sClient.PortForwardService(ctx, p.Namespace, "hubble-relay", int32(p.PortForward), 0) if err != nil { - return err + return fmt.Errorf("failed to port forward: %w", err) } - - args := []string{ - "port-forward", - "-n", p.Namespace, - "svc/hubble-relay", - "--address", "127.0.0.1", - fmt.Sprintf("%d:%d", p.PortForward, relaySvc.Spec.Ports[0].Port)} - - if p.Context != "" { - args = append([]string{"--context", p.Context}, args...) - } - - _, err = utils.Exec(p, "kubectl", args...) - return err + p.Log("ℹ️ Hubble Relay is available at 127.0.0.1:%d", res.ForwardedPort.Local) + <-ctx.Done() + return nil } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/hubble/ui.go b/vendor/github.com/cilium/cilium/cilium-cli/hubble/ui.go index 3556d43eff..41b8133e2f 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/hubble/ui.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/hubble/ui.go @@ -4,42 +4,33 @@ package hubble import ( + "context" "fmt" "io" - "time" "github.com/pkg/browser" - "github.com/cilium/cilium/cilium-cli/internal/utils" + "github.com/cilium/cilium/cilium-cli/k8s" ) -func (p *Parameters) UIPortForwardCommand() error { - args := []string{ - "port-forward", - "-n", p.Namespace, - "svc/hubble-ui", - "--address", "127.0.0.1", - fmt.Sprintf("%d:80", p.UIPortForward)} - - if p.Context != "" { - args = append([]string{"--context", p.Context}, args...) +func (p *Parameters) UIPortForwardCommand(ctx context.Context, k8sClient *k8s.Client) error { + // default to first port configured on the service when svcPort is set to 0 + res, err := k8sClient.PortForwardService(ctx, p.Namespace, "hubble-ui", int32(p.UIPortForward), 0) + if err != nil { + return fmt.Errorf("failed to port forward: %w", err) } - go func() { - time.Sleep(5 * time.Second) - url := fmt.Sprintf("http://localhost:%d", p.UIPortForward) - - if p.UIOpenBrowser { - // avoid cluttering stdout/stderr when opening the browser - browser.Stdout = io.Discard - browser.Stderr = io.Discard - p.Log("ℹ️ Opening %q in your browser...", url) - browser.OpenURL(url) - } else { - p.Log("ℹ️ Hubble UI is available at %q", url) - } - }() + url := fmt.Sprintf("http://localhost:%d", res.ForwardedPort.Local) + if p.UIOpenBrowser { + // avoid cluttering stdout/stderr when opening the browser + browser.Stdout = io.Discard + browser.Stderr = io.Discard + p.Log("ℹ️ Opening %q in your browser...", url) + browser.OpenURL(url) + } else { + p.Log("ℹ️ Hubble UI is available at %q", url) + } - _, err := utils.Exec(p, "kubectl", args...) - return err + <-ctx.Done() + return nil } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/k8s/client.go b/vendor/github.com/cilium/cilium/cilium-cli/k8s/client.go index abc9604bed..c44296e3c8 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/k8s/client.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/k8s/client.go @@ -23,6 +23,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -39,7 +40,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth" // Register all auth providers (azure, gcp, oidc, openstack, ..). + _ "k8s.io/client-go/plugin/pkg/client/auth" // Register all auth providers (azure, gcp, oidc, openstack, ..) "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport/spdy" @@ -53,6 +54,12 @@ import ( "github.com/cilium/cilium/pkg/versioncheck" ) +func init() { + // Register the Cilium types in the default scheme. + _ = ciliumv2.AddToScheme(scheme.Scheme) + _ = ciliumv2alpha1.AddToScheme(scheme.Scheme) +} + type Client struct { Clientset kubernetes.Interface ExtensionClientset apiextensionsclientset.Interface // k8s api extension needed to retrieve CRDs @@ -66,10 +73,6 @@ type Client struct { } func NewClient(contextName, kubeconfig, ciliumNamespace string) (*Client, error) { - // Register the Cilium types in the default scheme. - _ = ciliumv2.AddToScheme(scheme.Scheme) - _ = ciliumv2alpha1.AddToScheme(scheme.Scheme) - restClientGetter := genericclioptions.ConfigFlags{ Context: &contextName, KubeConfig: &kubeconfig, @@ -311,11 +314,12 @@ func (c *Client) PodLogs(namespace, name string, opts *corev1.PodLogOptions) *re return c.Clientset.CoreV1().Pods(namespace).GetLogs(name, opts) } -func (c *Client) CiliumLogs(ctx context.Context, namespace, pod string, since time.Time) (string, error) { +func (c *Client) CiliumLogs(ctx context.Context, namespace, pod string, since time.Time, previous bool) (string, error) { opts := &corev1.PodLogOptions{ Container: defaults.AgentContainerName, Timestamps: true, SinceTime: &metav1.Time{Time: since}, + Previous: previous, } req := c.PodLogs(namespace, pod, opts) podLogs, err := req.Stream(ctx) @@ -353,8 +357,9 @@ func (c *Client) ExecInPod(ctx context.Context, namespace, pod, container string Container: container, Command: command, }) + if err != nil { - return result.Stdout, err + return result.Stdout, fmt.Errorf("%w: %q", err, result.Stderr.String()) } if errString := result.Stderr.String(); errString != "" { @@ -899,6 +904,10 @@ func (c *Client) ListEndpoints(ctx context.Context, o metav1.ListOptions) (*core return c.Clientset.CoreV1().Endpoints(corev1.NamespaceAll).List(ctx, o) } +func (c *Client) ListEndpointSlices(ctx context.Context, o metav1.ListOptions) (*discoveryv1.EndpointSliceList, error) { + return c.Clientset.DiscoveryV1().EndpointSlices(corev1.NamespaceAll).List(ctx, o) +} + func (c *Client) ListIngressClasses(ctx context.Context, o metav1.ListOptions) (*networkingv1.IngressClassList, error) { return c.Clientset.NetworkingV1().IngressClasses().List(ctx, o) } @@ -1094,3 +1103,64 @@ func (c *Client) CreateEphemeralContainer(ctx context.Context, pod *corev1.Pod, ctx, pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers", ) } + +type Object interface { + metav1.Object + runtime.Object +} + +// ApplyGeneric uses server-side apply to merge changes to an arbitrary object. +// Returns the applied object. +func (c *Client) ApplyGeneric(ctx context.Context, obj Object) (*unstructured.Unstructured, error) { + gvk, resource, err := c.Describe(obj) + if err != nil { + return nil, fmt.Errorf("could not get Kubernetes API information for %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + + // Now, convert the object to an Unstructured + u, ok := obj.(*unstructured.Unstructured) + if !ok { + b, err := json.Marshal(obj) + if err != nil { + return nil, fmt.Errorf("failed to convert to unstructured (marshal): %w", err) + } + u = &unstructured.Unstructured{} + if err := json.Unmarshal(b, u); err != nil { + return nil, fmt.Errorf("failed to convert to unstructured (unmarshal): %w", err) + } + } + + // Dragons: If we're passed a non-Unstructured object (e.g. v1.ConfigMap), it won't have + // the GVK set necessarily. So, use the retrieved GVK from the schema and add it. + // This is a no-op for Unstructured objects. + // TODO: use a proper codec + serializer + u.GetObjectKind().SetGroupVersionKind(gvk) + + // clear ManagedFields; it is not allowed to specify them in a Patch + u.SetManagedFields(nil) + + dynamicClient := c.DynamicClientset.Resource(resource).Namespace(obj.GetNamespace()) + return dynamicClient.Apply(ctx, obj.GetName(), u, metav1.ApplyOptions{Force: true, FieldManager: "cilium-cli"}) +} + +func (c *Client) GetGeneric(ctx context.Context, namespace, name string, obj Object) (*unstructured.Unstructured, error) { + _, resource, err := c.Describe(obj) + if err != nil { + return nil, fmt.Errorf("could not get Kubernetes API information for %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + + dynamicClient := c.DynamicClientset.Resource(resource).Namespace(namespace) + + return dynamicClient.Get(ctx, name, metav1.GetOptions{}) +} + +func (c *Client) DeleteGeneric(ctx context.Context, obj Object) error { + _, resource, err := c.Describe(obj) + if err != nil { + return fmt.Errorf("could not get Kubernetes API information for %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + + dynamicClient := c.DynamicClientset.Resource(resource).Namespace(obj.GetNamespace()) + + return dynamicClient.Delete(ctx, obj.GetName(), metav1.DeleteOptions{}) +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/k8s/dialer.go b/vendor/github.com/cilium/cilium/cilium-cli/k8s/dialer.go index c1b0325cec..c85f85d4f8 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/k8s/dialer.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/k8s/dialer.go @@ -5,99 +5,21 @@ package k8s import ( "context" - "io" - "net/http" - "strings" - "k8s.io/client-go/tools/portforward" - "k8s.io/client-go/transport/spdy" + "github.com/cilium/cilium/pkg/k8s" ) -// ForwardedPort holds the remote and local mapped port. -type ForwardedPort struct { - Local uint16 - Remote uint16 -} - -// PortForwardParameters are the needed parameters to call PortForward. -// Ports value follow the kubectl syntax: : -// 5000 means 5000:5000 listening on 5000 port locally, forwarding to 5000 in the pod -// 8888:5000 means listening on 8888 port locally, forwarding to 5000 in the pod -// 0:5000 means listening on a random port locally, forwarding to 5000 in the pod -// :5000 means listening on a random port locally, forwarding to 5000 in the pod -type PortForwardParameters struct { - Namespace string - Pod string - Ports []string - Addresses []string - OutWriters OutWriters -} - -// OutWriters holds the two io.Writer needed for the port forward -// one for the output and for the errors. -type OutWriters struct { - Out io.Writer - ErrOut io.Writer -} - -// PortForwardResult are the ports that have been forwarded. -type PortForwardResult struct { - ForwardedPorts []ForwardedPort -} - // PortForward executes in a goroutine a port forward command. // To stop the port-forwarding, use the context by cancelling it -func (c *Client) PortForward(ctx context.Context, p PortForwardParameters) (*PortForwardResult, error) { - req := c.Clientset.CoreV1().RESTClient().Post().Namespace(p.Namespace). - Resource("pods").Name(p.Pod).SubResource(strings.ToLower("PortForward")) - - roundTripper, upgrader, err := spdy.RoundTripperFor(c.Config) - if err != nil { - return nil, err - } - - dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, req.URL()) - stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1) - if len(p.Addresses) == 0 { - p.Addresses = []string{"localhost"} - } - - pw, err := portforward.NewOnAddresses(dialer, p.Addresses, p.Ports, stopChan, readyChan, p.OutWriters.Out, p.OutWriters.ErrOut) - if err != nil { - return nil, err - } - - errChan := make(chan error, 1) - go func() { - if err := pw.ForwardPorts(); err != nil { - errChan <- err - } - }() - - go func() { - <-ctx.Done() - close(stopChan) - }() - - select { - case <-pw.Ready: - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-errChan: - return nil, err - } - - ports, err := pw.GetPorts() - if err != nil { - return nil, err - } - - forwardedPorts := make([]ForwardedPort, 0, len(ports)) - for _, port := range ports { - forwardedPorts = append(forwardedPorts, ForwardedPort{port.Local, port.Remote}) - } +func (c *Client) PortForward(ctx context.Context, p k8s.PortForwardParameters) (*k8s.PortForwardResult, error) { + return k8s.NewPortForwarder(c.Clientset, c.Config).PortForward(ctx, p) +} - return &PortForwardResult{ - ForwardedPorts: forwardedPorts, - }, nil +// PortForwardService executes in a goroutine a port forward command towards one of the pod behind a +// service. If `localPort` is 0, a random port is selected. If `svcPort` is 0, uses the first port +// configured on the service. +// +// To stop the port-forwarding, use the context by cancelling it. +func (c *Client) PortForwardService(ctx context.Context, namespace, name string, localPort, svcPort int32) (*k8s.PortForwardServiceResult, error) { + return k8s.NewPortForwarder(c.Clientset, c.Config).PortForwardService(ctx, namespace, name, localPort, svcPort) } diff --git a/vendor/github.com/cilium/cilium/cilium-cli/k8s/helpers.go b/vendor/github.com/cilium/cilium/cilium-cli/k8s/helpers.go index 1c3af9b0d0..c6782cc31c 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/k8s/helpers.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/k8s/helpers.go @@ -4,10 +4,15 @@ package k8s import ( + "fmt" + corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" ) func NewServiceAccount(name string) *corev1.ServiceAccount { @@ -72,3 +77,27 @@ func NewTLSSecret(name, namespace string, data map[string][]byte) *corev1.Secret Type: corev1.SecretTypeTLS, } } + +// Describe returns the Kubernetes type and resource information for an object +func (c *Client) Describe(obj runtime.Object) (gvk schema.GroupVersionKind, resource schema.GroupVersionResource, err error) { + // first, determine the GroupVersionKind and Resource for the given object + gvks, _, _ := scheme.Scheme.ObjectKinds(obj) + if len(gvks) != 1 { + err = fmt.Errorf("Could not get GroupVersionKind") + return + } + + gvk = gvks[0] + + // Convert the GroupVersionKind in to a Resource + restMapper, err := c.RESTClientGetter.ToRESTMapper() + if err != nil { + return + } + rm, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return + } + resource = rm.Resource + return +} diff --git a/vendor/github.com/cilium/cilium/cilium-cli/multicast/multicast.go b/vendor/github.com/cilium/cilium/cilium-cli/multicast/multicast.go index f5c30dfc12..afc01645a4 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/multicast/multicast.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/multicast/multicast.go @@ -18,6 +18,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/cilium/ebpf" + "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/k8s" v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" @@ -29,15 +31,13 @@ import ( // in cilium/cilium repository. const ( - padding = 3 - minWidth = 5 - paddingChar = ' ' - alreadyExistMsg = "already exists" - notExistMsg = "does not exist" + padding = 3 + minWidth = 5 + paddingChar = ' ' ) var ( - errMissingGroup = errors.New(notExistMsg) + errMissingGroup = ebpf.ErrKeyNotExist ) type Multicast struct { @@ -236,7 +236,7 @@ func (m *Multicast) getGroupForSubscriberList(ctx context.Context, pod corev1.Po cmd := []string{"cilium-dbg", "bpf", "multicast", "subscriber", "list", target, "-o", "json"} outputByte, stdErr, err := m.client.ExecInPodWithStderr(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, cmd) if err != nil { - if strings.Contains(stdErr.String(), notExistMsg) { + if strings.Contains(stdErr.String(), ebpf.ErrKeyNotExist.Error()) { fmt.Fprintf(m.params.Writer, "Multicast group %s does not exist in %s\n", target, pod.Spec.NodeName) return nil, errMissingGroup } @@ -465,7 +465,7 @@ func (m *Multicast) AddAllNodes() error { cmd := []string{"cilium-dbg", "bpf", "multicast", "subscriber", "list", m.params.MulticastGroupIP} _, stdErr, err := m.client.ExecInPodWithStderr(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, cmd) if err != nil { - if !strings.Contains(stdErr.String(), notExistMsg) { + if !strings.Contains(stdErr.String(), ebpf.ErrKeyNotExist.Error()) { errMsg := fmt.Sprintf("Error: %v, Stderr: %s", err, stdErr.String()) errCh <- errors.New(errMsg) fmt.Fprintf(m.params.Writer, "Fatal error occurred while checking multicast group %s in %s\n", m.params.MulticastGroupIP, pod.Spec.NodeName) @@ -492,7 +492,7 @@ func (m *Multicast) AddAllNodes() error { if err == nil { cnt++ nodeLists = append(nodeLists, ipToNodeMap[ip]) - } else if !strings.Contains(stdErr.String(), alreadyExistMsg) { + } else if !strings.Contains(stdErr.String(), ebpf.ErrKeyExist.Error()) { errMsg := fmt.Sprintf("Error: %v, Stderr: %s", err, stdErr.String()) errCh <- errors.New(errMsg) fmt.Fprintf(m.params.Writer, "Unable to add node %s to multicast group %s in %s by fatal error\n", ip.IP, m.params.MulticastGroupIP, pod.Spec.NodeName) @@ -558,7 +558,7 @@ func (m *Multicast) DelAllNodes() error { cmd := []string{"cilium-dbg", "bpf", "multicast", "group", "delete", m.params.MulticastGroupIP} _, stdErr, err := m.client.ExecInPodWithStderr(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, cmd) if err != nil { - if !strings.Contains(stdErr.String(), notExistMsg) { + if !strings.Contains(stdErr.String(), ebpf.ErrKeyNotExist.Error()) { errMsg := fmt.Sprintf("Error: %v, Stderr: %s", err, stdErr.String()) errCh <- errors.New(errMsg) fmt.Fprintf(m.params.Writer, "Unable to delete multicast group %s in %s by fatal error\n", m.params.MulticastGroupIP, pod.Spec.NodeName) diff --git a/vendor/github.com/cilium/cilium/cilium-cli/status/k8s.go b/vendor/github.com/cilium/cilium/cilium-cli/status/k8s.go index 31a4fbf300..e1de33cdcf 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/status/k8s.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/status/k8s.go @@ -9,6 +9,7 @@ import ( "fmt" "net/http" "os" + "sort" "strings" "sync" "time" @@ -25,6 +26,7 @@ import ( "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/k8s" + "github.com/cilium/cilium/pkg/annotation" ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" ) @@ -69,11 +71,12 @@ type k8sImplementation interface { CiliumStatus(ctx context.Context, namespace, pod string) (*models.StatusResponse, error) KVStoreMeshStatus(ctx context.Context, namespace, pod string) ([]*models.RemoteCluster, error) CiliumDbgEndpoints(ctx context.Context, namespace, pod string) ([]*models.Endpoint, error) + GetConfigMap(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*corev1.ConfigMap, error) GetDaemonSet(ctx context.Context, namespace, name string, options metav1.GetOptions) (*appsv1.DaemonSet, error) GetDeployment(ctx context.Context, namespace, name string, options metav1.GetOptions) (*appsv1.Deployment, error) ListPods(ctx context.Context, namespace string, options metav1.ListOptions) (*corev1.PodList, error) ListCiliumEndpoints(ctx context.Context, namespace string, options metav1.ListOptions) (*ciliumv2.CiliumEndpointList, error) - CiliumLogs(ctx context.Context, namespace, pod string, since time.Time) (string, error) + CiliumLogs(ctx context.Context, namespace, pod string, since time.Time, previous bool) (string, error) } func NewK8sStatusCollector(client k8sImplementation, params K8sStatusParameters) (*K8sStatusCollector, error) { @@ -308,6 +311,20 @@ func (k *K8sStatusCollector) podStatus(ctx context.Context, status *Status, name return nil } +func (k *K8sStatusCollector) ciliumConfigAnnotations(ctx context.Context, status *Status) error { + cm, err := k.client.GetConfigMap(ctx, k.params.Namespace, defaults.ConfigMapName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("unable to retrieve ConfigMap %q: %w", defaults.ConfigMapName, err) + } + for k, v := range cm.Annotations { + if strings.HasPrefix(k, annotation.ConfigPrefix) { + status.ConfigErrors = append(status.ConfigErrors, v) + } + } + sort.Strings(status.ConfigErrors) + return nil +} + func (s K8sStatusParameters) waitTimeout() time.Duration { if s.WaitDuration != time.Duration(0) { return s.WaitDuration @@ -350,11 +367,14 @@ func (k *K8sStatusCollector) Status(ctx context.Context) (*Status, error) { for { select { case <-ctx.Done(): + if errors.Is(ctx.Err(), context.Canceled) { + return mostRecentStatus, fmt.Errorf("wait canceled, cilium agent container has crashed or was terminated: %w", ctx.Err()) + } return mostRecentStatus, fmt.Errorf("timeout while waiting for status to become successful: %w", ctx.Err()) default: } - s := k.status(ctx) + s := k.status(ctx, cancel) // We collect the most recent status that even if the last status call // fails, we can still display the most recent status if s != nil { @@ -399,7 +419,7 @@ type statusTask struct { task func(_ context.Context) error } -func (k *K8sStatusCollector) status(ctx context.Context) *Status { +func (k *K8sStatusCollector) status(ctx context.Context, cancel context.CancelFunc) *Status { status := newStatus() tasks := []statusTask{ { @@ -565,6 +585,18 @@ func (k *K8sStatusCollector) status(ctx context.Context) *Status { return nil }, }, + { + name: defaults.ConfigMapName, + task: func(_ context.Context) error { + err := k.ciliumConfigAnnotations(ctx, status) + if err != nil { + status.mutex.Lock() + defer status.mutex.Unlock() + status.CollectionError(err) + } + return nil + }, + }, } tasks = append(tasks, statusTask{ @@ -601,6 +633,7 @@ func (k *K8sStatusCollector) status(ctx context.Context) *Status { var s *models.StatusResponse var eps []*models.Endpoint var err, epserr error + var isTerminated bool if containerStatus != nil && containerStatus.State.Running != nil { // if container is running, execute "cilium status" in the container and parse the result @@ -616,6 +649,7 @@ func (k *K8sStatusCollector) status(ctx context.Context) *Status { if containerStatus != nil { if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == "CrashLoopBackOff" { desc = "is in CrashLoopBackOff" + isTerminated = true } if containerStatus.LastTerminationState.Terminated != nil { terminated := containerStatus.LastTerminationState.Terminated @@ -625,20 +659,27 @@ func (k *K8sStatusCollector) status(ctx context.Context) *Status { // either from container message or a separate logs request dyingGasp := "" if terminated.Message != "" { - dyingGasp = strings.TrimSpace(terminated.Message) + lastLog = strings.TrimSpace(terminated.Message) } else { agentLogsOnce.Do(func() { // in a sync.Once so we don't waste time retrieving lots of logs - logs, err := k.client.CiliumLogs(ctx, pod.Namespace, pod.Name, terminated.FinishedAt.Time.Add(-2*time.Minute)) + var getPrevious bool + if containerStatus.RestartCount > 0 { + getPrevious = true + } + logs, err := k.client.CiliumLogs(ctx, pod.Namespace, pod.Name, terminated.FinishedAt.Time.Add(-2*time.Minute), getPrevious) if err == nil && logs != "" { dyingGasp = strings.TrimSpace(logs) } }) } - // Only output the last line + // output the last few log lines if available if dyingGasp != "" { lines := strings.Split(dyingGasp, "\n") - lastLog = lines[len(lines)-1] + lastLog = "" + for i := 0; i < min(len(lines), 50); i++ { + lastLog += fmt.Sprintf("\n%s", lines[i]) + } } } } @@ -653,6 +694,11 @@ func (k *K8sStatusCollector) status(ctx context.Context) *Status { status.CiliumStatus[pod.Name] = s status.CiliumEndpoints[pod.Name] = eps + // avoid repeating the status check if the container is in a terminal state + if isTerminated { + cancel() + } + return nil }, }) diff --git a/vendor/github.com/cilium/cilium/cilium-cli/status/status.go b/vendor/github.com/cilium/cilium/cilium-cli/status/status.go index 9ef337bd27..685c3dd5a1 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/status/status.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/status/status.go @@ -140,6 +140,8 @@ type Status struct { // For Helm mode only. HelmChartVersion string `json:"helm_chart_version,omitempty"` + ConfigErrors []string `json:"config_errors,omitempty"` + mutex *lock.Mutex } @@ -433,6 +435,14 @@ func (s *Status) Format() string { } } + header = "Configuration:" + for _, msg := range s.ConfigErrors { + for _, line := range strings.Split(msg, "\n") { + fmt.Fprintf(w, "%s\t \t%s\n", header, line) + header = "" + } + } + w.Flush() return buf.String() diff --git a/vendor/github.com/cilium/cilium/cilium-cli/sysdump/client.go b/vendor/github.com/cilium/cilium/cilium-cli/sysdump/client.go index b2eb592e0e..be7f8459f7 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/sysdump/client.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/sysdump/client.go @@ -12,6 +12,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -71,6 +72,7 @@ type KubernetesClient interface { ListDaemonSet(ctx context.Context, namespace string, o metav1.ListOptions) (*appsv1.DaemonSetList, error) ListEvents(ctx context.Context, o metav1.ListOptions) (*corev1.EventList, error) ListEndpoints(ctx context.Context, o metav1.ListOptions) (*corev1.EndpointsList, error) + ListEndpointSlices(ctx context.Context, o metav1.ListOptions) (*discoveryv1.EndpointSliceList, error) ListIngressClasses(ctx context.Context, o metav1.ListOptions) (*networkingv1.IngressClassList, error) ListIngresses(ctx context.Context, o metav1.ListOptions) (*networkingv1.IngressList, error) ListNamespaces(ctx context.Context, o metav1.ListOptions) (*corev1.NamespaceList, error) diff --git a/vendor/github.com/cilium/cilium/cilium-cli/sysdump/constants.go b/vendor/github.com/cilium/cilium/cilium-cli/sysdump/constants.go index fa537e40d5..5da479d40e 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/sysdump/constants.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/sysdump/constants.go @@ -97,6 +97,7 @@ const ( hubbleGenerateCertsCronJobFileName = "hubble-generate-certs-cronjob-.yaml" hubbleCertificatesFileName = "hubble-certificates-.yaml" kubernetesEndpointsFileName = "k8s-endpoints-.yaml" + kubernetesEndpointSlicesFileName = "k8s-endpointslices-.yaml" kubernetesEventsFileName = "k8s-events-.yaml" kubernetesEventsTableFileName = "k8s-events-.html" kubernetesLeasesFileName = "k8s-leases-.yaml" diff --git a/vendor/github.com/cilium/cilium/cilium-cli/sysdump/sysdump.go b/vendor/github.com/cilium/cilium/cilium-cli/sysdump/sysdump.go index 4e45333a91..408f0a70d2 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/sysdump/sysdump.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/sysdump/sysdump.go @@ -163,6 +163,8 @@ type Collector struct { NodeList []string // CiliumPods is a list of Cilium agent pods running on nodes in NodeList. CiliumPods []*corev1.Pod + // CiliumOperatorPods is the list of Cilium operator pods. + CiliumOperatorPods []*corev1.Pod // CiliumConfigMap is a pointer to cilium-config ConfigMap. CiliumConfigMap *corev1.ConfigMap // additionalTasks keeps track of additional tasks added via AddTasks. @@ -298,6 +300,16 @@ func NewCollector( } } + if c.Options.CiliumOperatorNamespace != "" { + pods, err := c.Client.ListPods(context.Background(), c.Options.CiliumOperatorNamespace, metav1.ListOptions{ + LabelSelector: c.Options.CiliumOperatorLabelSelector, + }) + if err != nil { + return nil, fmt.Errorf("failed to get Cilium operator pods: %w", err) + } + c.CiliumOperatorPods = AllPods(pods) + } + if err := hooks.AddSysdumpTasks(c); err != nil { return nil, fmt.Errorf("failed to add custom sysdump tasks: %w", err) } @@ -519,6 +531,20 @@ func (c *Collector) Run() error { return nil }, }, + { + Description: "Collecting Kubernetes endpointslices", + Quick: true, + Task: func(ctx context.Context) error { + v, err := c.Client.ListEndpointSlices(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to collect Kubernetes endpointslices: %w", err) + } + if err := c.WriteYAML(kubernetesEndpointSlicesFileName, v); err != nil { + return fmt.Errorf("failed to collect Kubernetes endpointslices: %w", err) + } + return nil + }, + }, { Description: "Collecting Kubernetes leases", Quick: true, @@ -1002,12 +1028,8 @@ func (c *Collector) Run() error { CreatesSubtasks: true, Description: "Collecting the Cilium operator metrics", Quick: false, - Task: func(ctx context.Context) error { - pods, err := c.Client.ListPods(ctx, c.Options.CiliumOperatorNamespace, metav1.ListOptions{LabelSelector: defaults.OperatorPodSelector}) - if err != nil { - return fmt.Errorf("failed to get the Cilium operator pods: %w", err) - } - err = c.SubmitMetricsSubtask(pods, defaults.OperatorContainerName, defaults.OperatorMetricsPortName) + Task: func(_ context.Context) error { + err := c.SubmitMetricsSubtask(c.CiliumOperatorPods, defaults.OperatorContainerName, defaults.OperatorMetricsPortName) if err != nil { return fmt.Errorf("failed to collect the Cilium operator metrics: %w", err) } @@ -1020,11 +1042,13 @@ func (c *Collector) Run() error { Quick: false, Task: func(ctx context.Context) error { // clustermesh-apiserver runs in the same namespace as operator - pods, err := c.Client.ListPods(ctx, c.Options.CiliumOperatorNamespace, metav1.ListOptions{LabelSelector: defaults.ClusterMeshPodSelector}) + p, err := c.Client.ListPods(ctx, c.Options.CiliumOperatorNamespace, metav1.ListOptions{LabelSelector: defaults.ClusterMeshPodSelector}) if err != nil { return fmt.Errorf("failed to get the Cilium clustermesh pods: %w", err) } + pods := AllPods(p) + err = c.submitClusterMeshAPIServerDbgTasks(pods) if err != nil { return fmt.Errorf("failed to collect the Cilium clustermesh debug information: %w", err) @@ -1047,13 +1071,13 @@ func (c *Collector) Run() error { defaults.ClusterMeshContainerName: ciliumdef.GopsPortApiserver, defaults.ClusterMeshKVStoreMeshContainerName: ciliumdef.GopsPortKVStoreMesh, } { - err = c.SubmitGopsSubtasks(AllPods(pods), container) + err = c.SubmitGopsSubtasks(pods, container) if err != nil { return fmt.Errorf("failed to collect the Cilium clustermesh gops stats: %w", err) } if c.Options.Profiling { - err = c.SubmitStreamProfilingGopsSubtasks(AllPods(pods), container, port) + err = c.SubmitStreamProfilingGopsSubtasks(pods, container, port) if err != nil { return fmt.Errorf("failed to collect the Cilium clustermesh profiles: %w", err) } @@ -1126,14 +1150,8 @@ func (c *Collector) Run() error { Description: "Collecting gops stats from Cilium-operator pods", Quick: true, Task: func(ctx context.Context) error { - p, err := c.Client.ListPods(ctx, c.Options.CiliumNamespace, metav1.ListOptions{ - LabelSelector: c.Options.CiliumOperatorLabelSelector, - }) - if err != nil { - return fmt.Errorf("failed to get cilium-operator pods: %w", err) - } - if err := c.SubmitGopsSubtasks(FilterPods(p, c.NodeList), ciliumOperatorContainerName); err != nil { - return fmt.Errorf("failed to collect Cilium gops: %w", err) + if err := c.SubmitGopsSubtasks(c.CiliumOperatorPods, ciliumOperatorContainerName); err != nil { + return fmt.Errorf("failed to collect cilium-operator gops stats: %w", err) } return nil }, @@ -1185,14 +1203,16 @@ func (c *Collector) Run() error { }, { CreatesSubtasks: true, - Description: "Collecting profiling data from Cilium pods", + Description: "Collecting profiling data from Cilium Operator pods", Quick: false, Task: func(_ context.Context) error { if !c.Options.Profiling { return nil } - if err := c.SubmitProfilingGopsSubtasks(c.CiliumPods, ciliumAgentContainerName); err != nil { - return fmt.Errorf("failed to collect profiling data from Cilium pods: %w", err) + + err := c.SubmitStreamProfilingGopsSubtasks(c.CiliumOperatorPods, ciliumOperatorContainerName, ciliumdef.GopsPortOperator) + if err != nil { + return fmt.Errorf("failed to collect cilium-operator profiles: %w", err) } return nil }, @@ -1247,13 +1267,7 @@ func (c *Collector) Run() error { Description: "Collecting logs from Cilium operator pods", Quick: false, Task: func(ctx context.Context) error { - p, err := c.Client.ListPods(ctx, c.Options.CiliumNamespace, metav1.ListOptions{ - LabelSelector: c.Options.CiliumOperatorLabelSelector, - }) - if err != nil { - return fmt.Errorf("failed to get logs from Cilium operator pods") - } - if err := c.SubmitLogsTasks(AllPods(p), c.Options.LogsSinceTime, c.Options.LogsLimitBytes); err != nil { + if err := c.SubmitLogsTasks(c.CiliumOperatorPods, c.Options.LogsSinceTime, c.Options.LogsLimitBytes); err != nil { return fmt.Errorf("failed to collect logs from Cilium operator pods") } return nil @@ -1404,6 +1418,19 @@ func (c *Collector) Run() error { tasks = append(tasks, ciliumTasks...) serialTasks = append(serialTasks, Task{ + CreatesSubtasks: true, + Description: "Collecting profiling data from Cilium pods", + Quick: false, + Task: func(_ context.Context) error { + if !c.Options.Profiling { + return nil + } + if err := c.SubmitProfilingGopsSubtasks(c.CiliumPods, ciliumAgentContainerName); err != nil { + return fmt.Errorf("failed to collect profiling data from Cilium pods: %w", err) + } + return nil + }, + }, Task{ CreatesSubtasks: true, Description: "Collecting tracing data from Cilium pods", Quick: false, @@ -2828,10 +2855,10 @@ func (c *Collector) submitKVStoreTasks(ctx context.Context, pod *corev1.Pod) err } // SubmitMetricsSubtask submits tasks to collect metrics from pods. -func (c *Collector) SubmitMetricsSubtask(pods *corev1.PodList, containerName, portName string) error { - for _, p := range pods.Items { +func (c *Collector) SubmitMetricsSubtask(pods []*corev1.Pod, containerName, portName string) error { + for _, p := range pods { p := p - if !podIsRunningAndHasContainer(&p, containerName) { + if !podIsRunningAndHasContainer(p, containerName) { continue } err := c.Pool.Submit(fmt.Sprintf("metrics-%s-%s-%s", p.Name, containerName, portName), func(ctx context.Context) error { @@ -2855,7 +2882,7 @@ func (c *Collector) SubmitMetricsSubtask(pods *corev1.PodList, containerName, po return nil } -func (c *Collector) submitClusterMeshAPIServerDbgTasks(pods *corev1.PodList) error { +func (c *Collector) submitClusterMeshAPIServerDbgTasks(pods []*corev1.Pod) error { tasks := []struct { name string ext string @@ -2899,9 +2926,9 @@ func (c *Collector) submitClusterMeshAPIServerDbgTasks(pods *corev1.PodList) err }, } - for _, pod := range pods.Items { + for _, pod := range pods { for _, task := range tasks { - if !podIsRunningAndHasContainer(&pod, task.container) { + if !podIsRunningAndHasContainer(pod, task.container) { continue } @@ -2936,7 +2963,7 @@ func (c *Collector) submitClusterMeshAPIServerDbgTasks(pods *corev1.PodList) err return nil } -func getPodMetricsPort(pod corev1.Pod, containerName, portName string) (int32, error) { +func getPodMetricsPort(pod *corev1.Pod, containerName, portName string) (int32, error) { for _, container := range pod.Spec.Containers { if container.Name != containerName { continue diff --git a/vendor/github.com/cilium/cilium/cilium-cli/utils/features/features.go b/vendor/github.com/cilium/cilium/cilium-cli/utils/features/features.go index 20c72569a6..cfc342fa7f 100644 --- a/vendor/github.com/cilium/cilium/cilium-cli/utils/features/features.go +++ b/vendor/github.com/cilium/cilium/cilium-cli/utils/features/features.go @@ -42,8 +42,9 @@ const ( HealthChecking Feature = "health-checking" - EncryptionPod Feature = "encryption-pod" - EncryptionNode Feature = "encryption-node" + EncryptionPod Feature = "encryption-pod" + EncryptionNode Feature = "encryption-node" + EncryptionStrictMode Feature = "enable-encryption-strict-mode" IPv4 Feature = "ipv4" IPv6 Feature = "ipv6" @@ -80,6 +81,8 @@ const ( BGPControlPlane Feature = "enable-bgp-control-plane" NodeLocalDNS Feature = "node-local-dns" + + Multicast Feature = "multicast-enabled" ) // Feature is the name of a Cilium Feature (e.g. l7-proxy, cni chaining mode etc) @@ -320,6 +323,14 @@ func (fs Set) ExtractFromConfigMap(cm *v1.ConfigMap) { fs[BGPControlPlane] = Status{ Enabled: cm.Data[string(BGPControlPlane)] == "true", } + + fs[Multicast] = Status{ + Enabled: cm.Data[string(Multicast)] == "true", + } + + fs[EncryptionStrictMode] = Status{ + Enabled: cm.Data[string(EncryptionStrictMode)] == "true", + } } func (fs Set) ExtractFromNodes(nodesWithoutCilium map[string]struct{}) { diff --git a/vendor/github.com/cilium/cilium/hubble/pkg/printer/printer.go b/vendor/github.com/cilium/cilium/hubble/pkg/printer/printer.go index d1aab44024..d08940b9e9 100644 --- a/vendor/github.com/cilium/cilium/hubble/pkg/printer/printer.go +++ b/vendor/github.com/cilium/cilium/hubble/pkg/printer/printer.go @@ -800,6 +800,8 @@ func (p *Printer) WriteGetFlowsResponse(res *observerpb.GetFlowsResponse) error return p.WriteProtoFlow(res) case *observerpb.GetFlowsResponse_NodeStatus: return p.WriteProtoNodeStatusEvent(res) + case *observerpb.GetFlowsResponse_LostEvents: + return p.WriteLostEvent(res) case nil: return nil default: @@ -914,3 +916,89 @@ func (p *Printer) WriteServerStatusResponse(res *observerpb.ServerStatusResponse } return nil } + +// WriteLostEvent writes v1.Flow into the output writer. +func (p *Printer) WriteLostEvent(res *observerpb.GetFlowsResponse) error { + f := res.GetLostEvents() + + switch p.opts.output { + case TabOutput: + ew := &errWriter{w: p.tw} + src := f.GetSource() + numEventsLost := f.GetNumEventsLost() + cpu := f.GetCpu() + + if p.line == 0 { + ew.write("TIMESTAMP", tab) + if p.opts.nodeName { + ew.write("NODE", tab) + } + ew.write( + "SOURCE", tab, + "DESTINATION", tab, + "TYPE", tab, + "VERDICT", tab, + "SUMMARY", newline, + ) + } + ew.write("", tab) + if p.opts.nodeName { + ew.write("", tab) + } + ew.write( + src, tab, + "", tab, + "EVENTS LOST", tab, + "", tab, + fmt.Sprintf("CPU(%d) - %d", cpu.GetValue(), numEventsLost), newline, + ) + if ew.err != nil { + return fmt.Errorf("failed to write out packet: %w", ew.err) + } + case DictOutput: + ew := &errWriter{w: p.opts.w} + src := f.GetSource() + numEventsLost := f.GetNumEventsLost() + cpu := f.GetCpu() + if p.line != 0 { + // TODO: line length? + ew.write(dictSeparator, newline) + } + + // this is a little crude, but will do for now. should probably find the + // longest header and auto-format the keys + ew.write(" TIMESTAMP: ", "", newline) + if p.opts.nodeName { + ew.write(" NODE: ", "", newline) + } + ew.write( + " SOURCE: ", src, newline, + " TYPE: ", "EVENTS LOST", newline, + " VERDICT: ", "", newline, + " SUMMARY: ", fmt.Sprintf("CPU(%d) - %d", cpu.GetValue(), numEventsLost), newline, + ) + if ew.err != nil { + return fmt.Errorf("failed to write out packet: %w", ew.err) + } + case CompactOutput: + src := f.GetSource() + numEventsLost := f.GetNumEventsLost() + cpu := f.GetCpu() + + _, err := fmt.Fprintf(p.opts.w, + "EVENTS LOST: %s CPU(%d) %d\n", + src, + cpu.GetValue(), + numEventsLost, + ) + if err != nil { + return fmt.Errorf("failed to write out packet: %w", err) + } + case JSONLegacyOutput: + return p.jsonEncoder.Encode(f) + case JSONPBOutput: + return p.jsonEncoder.Encode(res) + } + p.line++ + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go index 5e015b83a2..13015dfbfb 100644 --- a/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go +++ b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go @@ -13,7 +13,6 @@ import ( "github.com/cilium/cilium/pkg/backoff" "github.com/cilium/cilium/pkg/idpool" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/kvstore" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging" @@ -28,9 +27,9 @@ var ( ) const ( - // maxAllocAttempts is the number of attempted allocation requests - // performed before failing. - maxAllocAttempts = 16 + // defaultMaxAllocAttempts is the default number of attempted allocation + // requests performed before failing. + defaultMaxAllocAttempts = 16 ) // Allocator is a distributed ID allocator backed by a KVstore. It maps @@ -151,6 +150,13 @@ type Allocator struct { // disableAutostart prevents starting the allocator when it is initialized disableAutostart bool + // operatorIDManagement indicates if cilium-operator is managing Cilium Identities. + operatorIDManagement bool + + // maxAllocAttempts is the number of attempted allocation requests + // performed before failing. + maxAllocAttempts int + // cacheValidators implement extra validations of retrieved identities, e.g., // to ensure that they belong to the expected range. cacheValidators []CacheValidator @@ -315,6 +321,7 @@ func NewAllocator(typ AllocatorKey, backend Backend, opts ...AllocatorOption) (* Min: time.Duration(20) * time.Millisecond, Factor: 2.0, }, + maxAllocAttempts: defaultMaxAllocAttempts, } for _, fn := range opts { @@ -400,6 +407,18 @@ func WithMasterKeyProtection() AllocatorOption { return func(a *Allocator) { a.enableMasterKeyProtection = true } } +// WithOperatorIDManagement enables the mode with cilium-operator managing +// Cilium Identities. +func WithOperatorIDManagement() AllocatorOption { + return func(a *Allocator) { a.operatorIDManagement = true } +} + +// WithMaxAllocAttempts sets the maxAllocAttempts. If not set, new Allocator +// will use defaultMaxAllocAttempts. +func WithMaxAllocAttempts(maxAttempts int) AllocatorOption { + return func(a *Allocator) { a.maxAllocAttempts = maxAttempts } +} + // WithoutGC disables the use of the garbage collector func WithoutGC() AllocatorOption { return func(a *Allocator) { a.disableGC = true } @@ -665,13 +684,22 @@ func (a *Allocator) Allocate(ctx context.Context, key AllocatorKey) (idpool.ID, return 0, false, false, fmt.Errorf("allocation was cancelled while waiting for initial key list to be received: %w", ctx.Err()) } + if a.operatorIDManagement { + id, err := a.GetWithRetry(ctx, key) + // The second and third return values are always false when + // operatorIDManagement is enabled because cilium-operator manages security + // IDs, and they are never newly allocated or require holding a reference to + // a key. + return id, false, false, err + } + kvstore.Trace("Allocating from kvstore", nil, logrus.Fields{fieldKey: key}) // make a copy of the template and customize it boff := a.backoffTemplate boff.Name = key.String() - for attempt := 0; attempt < maxAllocAttempts; attempt++ { + for attempt := 0; attempt < a.maxAllocAttempts; attempt++ { // Check our list of local keys already in use and increment the // refcnt. The returned key must be released afterwards. No kvstore // operation was performed for this allocation. @@ -716,6 +744,55 @@ func (a *Allocator) Allocate(ctx context.Context, key AllocatorKey) (idpool.ID, return 0, false, false, err } +func (a *Allocator) GetWithRetry(ctx context.Context, key AllocatorKey) (idpool.ID, error) { + getID := func() (idpool.ID, error) { + id, err := a.Get(ctx, key) + if err != nil { + return idpool.NoID, err + } + + if id == idpool.NoID { + return idpool.NoID, fmt.Errorf("security identity not found for key %s", key.String()) + } + + return id, nil + } + + // Make a copy of the template and customize it. + boff := a.backoffTemplate + boff.Name = key.String() + + var id idpool.ID + var err error + + for attempt := 0; attempt < a.maxAllocAttempts; attempt++ { + id, err = getID() + if err == nil { + return id, nil + } + + scopedLog := log.WithFields(logrus.Fields{ + fieldKey: key, + logfields.Attempt: attempt, + }) + + select { + case <-ctx.Done(): + scopedLog.WithError(ctx.Err()).Warning("Ongoing key allocation has been cancelled") + return idpool.NoID, fmt.Errorf("key allocation cancelled: %w", ctx.Err()) + default: + scopedLog.WithError(err).Debug("CiliumIdentity not yet created by cilium-operator, retrying...") + } + + if waitErr := boff.Wait(ctx); waitErr != nil { + scopedLog.Warning("timed out waiting for cilium-operator to allocate CiliumIdentity") + return idpool.NoID, fmt.Errorf("timed out waiting for cilium-operator to allocate CiliumIdentity for key %v, error: %w", key.GetKey(), waitErr) + } + } + + return idpool.NoID, err +} + // GetIfLocked returns the ID which is allocated to a key. Returns an ID of NoID if no ID // has been allocated to this key yet if the client is still holding the given // lock. @@ -817,6 +894,11 @@ func (a *Allocator) GetByIDIncludeRemoteCaches(ctx context.Context, id idpool.ID // the last user has released the ID, the key is removed in the KVstore and // the returned lastUse value is true. func (a *Allocator) Release(ctx context.Context, key AllocatorKey) (lastUse bool, err error) { + if a.operatorIDManagement { + log.WithField(fieldKey, key).Debug("Skipping key release when cilium-operator ID management is enabled") + return false, nil + } + log.WithField(fieldKey, key).Info("Releasing key") select { @@ -888,8 +970,6 @@ func (a *Allocator) syncLocalKeys() error { func (a *Allocator) startLocalKeySync() { go func(a *Allocator) { - kvTimer, kvTimerDone := inctimer.New() - defer kvTimerDone() for { if err := a.syncLocalKeys(); err != nil { log.WithError(err).Warning("Unable to run local key sync routine") @@ -899,7 +979,7 @@ func (a *Allocator) startLocalKeySync() { case <-a.stopGC: log.Debug("Stopped master key sync routine") return - case <-kvTimer.After(option.Config.KVstorePeriodicSync): + case <-time.After(option.Config.KVstorePeriodicSync): } } }(a) diff --git a/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go b/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go index 6894f3c6cf..59c3ad238b 100644 --- a/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go +++ b/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go @@ -140,13 +140,6 @@ const ( // use SNAT so that reply traffic comes back ServiceForwardingMode = ServicePrefix + "/forwarding-mode" - // ProxyVisibility / ProxyVisibilityAlias is the annotation name used to - // indicate whether proxy visibility should be enabled for a given pod (i.e., - // all traffic for the pod is redirected to the proxy for the given port / - // protocol in the annotation - ProxyVisibility = PolicyPrefix + "/proxy-visibility" - ProxyVisibilityAlias = Prefix + ".proxy-visibility" - // NoTrack / NoTrackAlias is the annotation name used to store the port and // protocol that we should bypass kernel conntrack for a given pod. This // applies for both TCP and UDP connection. Current use case is NodeLocalDNS. diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go index c2cba6c384..5fecd2467a 100644 --- a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go @@ -206,6 +206,11 @@ type ENI struct { // // +optional Tags map[string]string `json:"tags,omitempty"` + + // PublicIP is the public IP associated with the ENI + // + // +optional + PublicIP string `json:"public-ip,omitempty"` } func (e *ENI) DeepCopyInterface() types.Interface { diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go index 4c1c93fc0e..00b50c30d5 100644 --- a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go @@ -163,6 +163,10 @@ func (in *ENI) DeepEqual(other *ENI) bool { } } + if in.PublicIP != other.PublicIP { + return false + } + return true } diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/types/fake_router.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/types/fake_router.go index 97a86388c5..f1bf0dc4ef 100644 --- a/vendor/github.com/cilium/cilium/pkg/bgpv1/types/fake_router.go +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/types/fake_router.go @@ -5,10 +5,14 @@ package types import "context" -type FakeRouter struct{} +type FakeRouter struct { + paths map[string]*Path +} func NewFakeRouter() Router { - return &FakeRouter{} + return &FakeRouter{ + paths: make(map[string]*Path), + } } func (f *FakeRouter) Stop() {} @@ -31,10 +35,13 @@ func (f *FakeRouter) ResetNeighbor(ctx context.Context, r ResetNeighborRequest) func (f *FakeRouter) AdvertisePath(ctx context.Context, p PathRequest) (PathResponse, error) { path := p.Path + f.paths[path.NLRI.String()] = path return PathResponse{path}, nil } func (f *FakeRouter) WithdrawPath(ctx context.Context, p PathRequest) error { + path := p.Path + delete(f.paths, path.NLRI.String()) return nil } @@ -51,7 +58,14 @@ func (f *FakeRouter) GetPeerState(ctx context.Context) (GetPeerStateResponse, er } func (f *FakeRouter) GetRoutes(ctx context.Context, r *GetRoutesRequest) (*GetRoutesResponse, error) { - return nil, nil + var routes []*Route + for _, path := range f.paths { + routes = append(routes, &Route{ + Prefix: path.NLRI.String(), + Paths: []*Path{path}, + }) + } + return &GetRoutesResponse{Routes: routes}, nil } func (f *FakeRouter) GetRoutePolicies(ctx context.Context) (*GetRoutePoliciesResponse, error) { diff --git a/vendor/github.com/cilium/cilium/pkg/bgpv1/types/log.go b/vendor/github.com/cilium/cilium/pkg/bgpv1/types/log.go index cc5cf76dd0..9f7a9384e4 100644 --- a/vendor/github.com/cilium/cilium/pkg/bgpv1/types/log.go +++ b/vendor/github.com/cilium/cilium/pkg/bgpv1/types/log.go @@ -42,4 +42,7 @@ const ( // PolicyLogField is used as key for BGP policy in the log field. PolicyLogField = "policy" + + // ResourceLogField is used as key for k8s resource in the log field. + ResourceLogField = "resource" ) diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go index 00b5673f6a..b00ee5c92e 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go @@ -4,13 +4,13 @@ package types import ( + "errors" "fmt" - "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/cilium/cilium/pkg/defaults" - "github.com/cilium/cilium/pkg/logging/logfields" + ipamOption "github.com/cilium/cilium/pkg/ipam/option" ) const ( @@ -47,29 +47,42 @@ func (def ClusterInfo) Flags(flags *pflag.FlagSet) { // Validate validates that the ClusterID is in the valid range (including ClusterID == 0), // and that the ClusterName is different from the default value if the ClusterID != 0. -func (c ClusterInfo) Validate(log logrus.FieldLogger) error { +func (c ClusterInfo) Validate() error { if c.ID < ClusterIDMin || c.ID > ClusterIDMax { return fmt.Errorf("invalid cluster id %d: must be in range %d..%d", c.ID, ClusterIDMin, ClusterIDMax) } - return c.validateName(log) + return c.validateName() } // ValidateStrict validates that the ClusterID is in the valid range, but not 0, // and that the ClusterName is different from the default value. -func (c ClusterInfo) ValidateStrict(log logrus.FieldLogger) error { +func (c ClusterInfo) ValidateStrict() error { if err := ValidateClusterID(c.ID); err != nil { return err } - return c.validateName(log) + return c.validateName() } -func (c ClusterInfo) validateName(log logrus.FieldLogger) error { +// ValidateBuggyClusterID returns an error if a buggy cluster ID (i.e., with the +// 7th bit set) is used in combination with ENI IPAM mode or AWS CNI chaining. +func (c ClusterInfo) ValidateBuggyClusterID(ipamMode, chainingMode string) error { + if (c.ID&0x80) != 0 && (ipamMode == ipamOption.IPAMENI || ipamMode == ipamOption.IPAMAlibabaCloud || chainingMode == "aws-cni") { + return errors.New("Cilium is currently affected by a bug that causes traffic matched " + + "by network policies to be incorrectly dropped when running in either ENI mode (both " + + "AWS and AlibabaCloud) or AWS VPC CNI chaining mode, if the cluster ID is 128-255 (and " + + "384-511 when max-connected-clusters=511). " + + "Please refer to https://github.com/cilium/cilium/issues/21330 for additional details.") + } + + return nil +} + +func (c ClusterInfo) validateName() error { if err := ValidateClusterName(c.Name); err != nil { - log.WithField(logfields.ClusterName, c.Name).WithError(err). - Error("Invalid cluster name. This may cause degraded functionality, and will be strictly forbidden starting from Cilium v1.17") + return fmt.Errorf("invalid cluster name: %w", err) } if c.ID != 0 && c.Name == defaults.ClusterName { diff --git a/vendor/github.com/cilium/cilium/pkg/container/bitlpm/cidr.go b/vendor/github.com/cilium/cilium/pkg/container/bitlpm/cidr.go index c80b199ac3..ee14422af0 100644 --- a/vendor/github.com/cilium/cilium/pkg/container/bitlpm/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/container/bitlpm/cidr.go @@ -54,6 +54,10 @@ func (c *CIDRTrie[T]) Ancestors(cidr netip.Prefix, fn func(k netip.Prefix, v T) }) } +func (c *CIDRTrie[T]) AncestorIterator(cidr netip.Prefix) Iterator[Key[netip.Prefix], T] { + return c.treeForFamily(cidr).AncestorIterator(uint(cidr.Bits()), cidrKey(cidr)) +} + // AncestorsLongestPrefixFirst iterates over every CIDR pair that contains the CIDR argument, // longest matching prefix first, then iterating towards the root of the trie. func (c *CIDRTrie[T]) AncestorsLongestPrefixFirst(cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) { @@ -62,6 +66,10 @@ func (c *CIDRTrie[T]) AncestorsLongestPrefixFirst(cidr netip.Prefix, fn func(k n }) } +func (c *CIDRTrie[T]) AncestorLongestPrefixFirstIterator(cidr netip.Prefix) Iterator[Key[netip.Prefix], T] { + return c.treeForFamily(cidr).AncestorLongestPrefixFirstIterator(uint(cidr.Bits()), cidrKey(cidr)) +} + // Descendants iterates over every CIDR that is contained by the CIDR argument. func (c *CIDRTrie[T]) Descendants(cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) { c.treeForFamily(cidr).Descendants(uint(cidr.Bits()), cidrKey(cidr), func(prefix uint, k Key[netip.Prefix], v T) bool { @@ -69,6 +77,10 @@ func (c *CIDRTrie[T]) Descendants(cidr netip.Prefix, fn func(k netip.Prefix, v T }) } +func (c *CIDRTrie[T]) DescendantIterator(cidr netip.Prefix) Iterator[Key[netip.Prefix], T] { + return c.treeForFamily(cidr).DescendantIterator(uint(cidr.Bits()), cidrKey(cidr)) +} + // DescendantsShortestPrefixFirst iterates over every CIDR that is contained by the CIDR argument. func (c *CIDRTrie[T]) DescendantsShortestPrefixFirst(cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) { c.treeForFamily(cidr).DescendantsShortestPrefixFirst(uint(cidr.Bits()), cidrKey(cidr), func(prefix uint, k Key[netip.Prefix], v T) bool { @@ -76,6 +88,10 @@ func (c *CIDRTrie[T]) DescendantsShortestPrefixFirst(cidr netip.Prefix, fn func( }) } +func (c *CIDRTrie[T]) DescendantShortestPrefixFirstIterator(cidr netip.Prefix) Iterator[Key[netip.Prefix], T] { + return c.treeForFamily(cidr).DescendantShortestPrefixFirstIterator(uint(cidr.Bits()), cidrKey(cidr)) +} + // Upsert adds or updates the value for a given prefix. func (c *CIDRTrie[T]) Upsert(cidr netip.Prefix, v T) bool { return c.treeForFamily(cidr).Upsert(uint(cidr.Bits()), cidrKey(cidr), v) diff --git a/vendor/github.com/cilium/cilium/pkg/container/bitlpm/trie.go b/vendor/github.com/cilium/cilium/pkg/container/bitlpm/trie.go index 3becf75182..d64c7f0e3b 100644 --- a/vendor/github.com/cilium/cilium/pkg/container/bitlpm/trie.go +++ b/vendor/github.com/cilium/cilium/pkg/container/bitlpm/trie.go @@ -44,12 +44,19 @@ type Trie[K, T any] interface { // Note: If the prefix argument exceeds the Trie's maximum // prefix, it will be set to the Trie's maximum prefix. Ancestors(prefix uint, key K, fn func(uint, K, T) bool) + // AncestorIterator returns an iterator for ancestors that + // can be used to produce the 'Next' key/value pair in sequence. + AncestorIterator(prefix uint, key K) Iterator[K, T] // AncestorsLongestPrefixFirst iterates over every prefix-key pair that // contains the prefix-key argument pair. If the function argument // returns false the iteration will stop. AncestorsLongestPrefixFirst // iterates keys from longest to shortest prefix match (that is, the // longest matching prefix will be returned first). AncestorsLongestPrefixFirst(prefix uint, key K, fn func(uint, K, T) bool) + // AncestorLongestPrefixFirstIterator returns an iterator for ancestors + // that can be used to produce the 'Next' key/value pair in sequence, + // starting from the key with the longest common prefix with 'key'. + AncestorLongestPrefixFirstIterator(prefix uint, key K) Iterator[K, T] // Descendants iterates over every prefix-key pair that is contained // by the prefix-key argument pair. If the function argument // returns false the iteration will stop. Descendants does **not** iterate @@ -58,12 +65,19 @@ type Trie[K, T any] interface { // Note: If the prefix argument exceeds the Trie's maximum // prefix, it will be set to the Trie's maximum prefix. Descendants(prefix uint, key K, fn func(uint, K, T) bool) + // DescendantIterator returns an iterator for descendants + // that can be used to produce the 'Next' key/value pair in sequence. + DescendantIterator(prefix uint, key K) Iterator[K, T] // DescendantsShortestPrefixFirst iterates over every prefix-key pair that is contained by // the prefix-key argument pair. If the function argument returns false the iteration will // stop. DescendantsShortestPrefixFirst iterates keys starting from shortest prefix, and // progressing towards keys with longer prefixes. Keys with equal prefix lengths are not // iterated in any particular order. DescendantsShortestPrefixFirst(prefix uint, key K, fn func(uint, K, T) bool) + // DescendantShortestPrefixFirstIterator returns an iterator for descendants + // that can be used to produce the 'Next' key/value pair in sequence, + // starting from the key with the shortest common prefix with 'key'. + DescendantShortestPrefixFirstIterator(prefix uint, key K) Iterator[K, T] // Upsert updates or inserts the trie with a a prefix, key, // and value. The method returns true if the key is new, and // false if the key already existed. @@ -84,6 +98,16 @@ type Trie[K, T any] interface { ForEach(fn func(uint, K, T) bool) } +// Iterator is an interface that can be used to produce the next key/value pair in iteration +// sequence. 'ok' is 'false' when the sequence ends; 'key' and 'value' are returned with empty +// values in that case. +// Iteration state is held in the implementation explicitly, rather than in Go stack/closures. +// Policy mapstate generation benchmark BenchmarkRegenerateCIDRDenyPolicyRules reports 25% less allocations +// with Iterator, even in combination with Go 1.23 Iterators on the caller side. +type Iterator[K, T any] interface { + Next() (ok bool, key K, value T) +} + // Key is an interface that implements all the necessary // methods to index and retrieve keys. type Key[K any] interface { @@ -171,6 +195,204 @@ func (t *trie[K, T]) Ancestors(prefixLen uint, k Key[K], fn func(prefix uint, ke }) } +// ancestorIterator implements Iteraror for ancestor iteration +type ancestorIterator[K, T any] struct { + key Key[K] + prefixLen uint + maxPrefix uint + currentNode *node[K, T] +} + +// AncestorIterator returns an iterator for ancestors. +func (t *trie[K, T]) AncestorIterator(prefixLen uint, k Key[K]) Iterator[Key[K], T] { + if k != nil { + return &ancestorIterator[K, T]{ + prefixLen: min(prefixLen, t.maxPrefix), + key: k, + maxPrefix: t.maxPrefix, + currentNode: t.root, + } + } + return &ancestorIterator[K, T]{} +} + +// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is +// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case. +func (i *ancestorIterator[K, T]) Next() (ok bool, key Key[K], value T) { + for i.currentNode != nil { + k := i.key + prefixLen := i.prefixLen + currentNode := i.currentNode + + matchLen := currentNode.prefixMatch(prefixLen, k) + // The current-node does not match. + if matchLen < currentNode.prefixLen { + break + } + // Skip over intermediate nodes + if currentNode.intermediate { + i.currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] + continue + } + if matchLen == i.maxPrefix { + i.currentNode = nil + } else { + i.currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] + } + return true, currentNode.key, currentNode.value + } + return false, key, value +} + +// ancestorLPFIterator implements Iteraror for ancestor iteration for longest-prefix-first iteration +// order. +type ancestorLPFIterator[K, T any] struct { + stack nodes[K, T] +} + +// AncestorLongestPrefixFirstIterator returns an iterator for ancestors +// that can be used to produce the 'Next' key/value pair in sequence, +// starting from the key with the longest common prefix with 'key'. +func (t *trie[K, T]) AncestorLongestPrefixFirstIterator(prefixLen uint, k Key[K]) Iterator[Key[K], T] { + iter := &ancestorLPFIterator[K, T]{} + if k != nil { + for currentNode := t.root; currentNode != nil; currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] { + matchLen := currentNode.prefixMatch(prefixLen, k) + // The current-node does not match. + if matchLen < currentNode.prefixLen { + break + } + // Skip over intermediate nodes + if currentNode.intermediate { + continue + } + iter.stack.push(currentNode) + if matchLen == t.maxPrefix { + break + } + } + } + return iter +} + +// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is +// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case. +func (i *ancestorLPFIterator[K, T]) Next() (ok bool, key Key[K], value T) { + if len(i.stack) > 0 { + n := i.stack.pop() + return true, n.key, n.value + } + return false, key, value +} + +// descendantIterator implements Iteraror for descendants iteration +type descendantIterator[K, T any] struct { + nodes nodes[K, T] +} + +// DescendantIterator returns an iterator for descendants +// that can be used to produce the 'Next' key/value pair in sequence. +func (t *trie[K, T]) DescendantIterator(prefixLen uint, k Key[K]) Iterator[Key[K], T] { + iter := &descendantIterator[K, T]{} + if k != nil { + prefixLen = min(prefixLen, t.maxPrefix) + currentNode := t.root + for currentNode != nil { + matchLen := currentNode.prefixMatch(prefixLen, k) + // CurrentNode matches the prefix-key argument + if matchLen >= prefixLen { + iter.nodes.push(currentNode) + break + } + // currentNode is a leaf and has no children. Calling k.BitValueAt may + // overrun the key storage. + if currentNode.prefixLen >= t.maxPrefix { + break + } + currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] + } + } + return iter +} + +// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is +// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case. +func (i *descendantIterator[K, T]) Next() (ok bool, key Key[K], value T) { + for len(i.nodes) > 0 { + // pop the latest node + n := i.nodes.pop() + // push the children, if any + if n.children[0] != nil { + i.nodes.push(n.children[0]) + } + if n.children[1] != nil { + i.nodes.push(n.children[1]) + } + // Skip over intermediate nodes + if n.intermediate { + continue + } + return true, n.key, n.value + } + return false, key, value +} + +// descendantSPFIterator implements Iteraror for descendants iteration for shortest-prefix-first +// iteration order. +type descendantSPFIterator[K, T any] struct { + heap nodes[K, T] +} + +// DescendantsShortestPrefixFirst iterates over every prefix-key pair that is contained by +// the prefix-key argument pair. If the function argument returns false the iteration will +// stop. DescendantsShortestPrefixFirst iterates keys starting from shortest prefix, and +// progressing towards keys with longer prefixes. Keys with equal prefix lengths are not +// iterated in any particular order. +func (t *trie[K, T]) DescendantShortestPrefixFirstIterator(prefixLen uint, k Key[K]) Iterator[Key[K], T] { + iter := &descendantSPFIterator[K, T]{} + if k != nil { + prefixLen = min(prefixLen, t.maxPrefix) + currentNode := t.root + for currentNode != nil { + matchLen := currentNode.prefixMatch(prefixLen, k) + // CurrentNode matches the prefix-key argument + if matchLen >= prefixLen { + iter.heap.push(currentNode) + break + } + // currentNode is a leaf and has no children. Calling k.BitValueAt may + // overrun the key storage. + if currentNode.prefixLen >= t.maxPrefix { + break + } + currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] + } + } + return iter +} + +// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is +// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case. +func (i *descendantSPFIterator[K, T]) Next() (ok bool, key Key[K], value T) { + for i.heap.Len() > 0 { + // pop the node with the lowest prefix length from the heap + n := i.heap.popHeap() + // push the children, if any, into the heap + if n.children[0] != nil { + i.heap.pushHeap(n.children[0]) + } + if n.children[1] != nil { + i.heap.pushHeap(n.children[1]) + } + // Skip over intermediate nodes + if n.intermediate { + continue + } + return true, n.key, n.value + } + return false, key, value +} + func (t *trie[K, T]) AncestorsLongestPrefixFirst(prefixLen uint, k Key[K], fn func(prefix uint, key Key[K], value T) bool) { prefixLen = min(prefixLen, t.maxPrefix) t.treverse(prefixLen, k, func(currentNode *node[K, T]) bool { @@ -676,12 +898,23 @@ func (nodes *nodes[K, T]) Pop() any { return node } -// convenience wrappers +func (nodes *nodes[K, T]) pop() *node[K, T] { + n := len(*nodes) + node := (*nodes)[n-1] + *nodes = (*nodes)[:n-1] + return node +} + func (nodes *nodes[K, T]) push(n *node[K, T]) { + *nodes = append(*nodes, n) +} + +// convenience wrappers +func (nodes *nodes[K, T]) pushHeap(n *node[K, T]) { heap.Push(nodes, n) } -func (nodes *nodes[K, T]) pop() *node[K, T] { +func (nodes *nodes[K, T]) popHeap() *node[K, T] { return heap.Pop(nodes).(*node[K, T]) } @@ -696,11 +929,11 @@ func (n *node[K, T]) forEachShortestPrefixFirst(fn func(prefix uint, key Key[K], // has the shortest prefix length of any node in the subtree it represents. // Preallocate space for some pointers to reduce allocations and copies. nodes := make(nodes[K, T], 0, nPointersOnCacheline) - nodes.push(n) + nodes.pushHeap(n) for nodes.Len() > 0 { // pop the node with the lowest prefix length from the heap - n := nodes.pop() + n := nodes.popHeap() if !n.intermediate { if !fn(n.prefixLen, n.key, n.value) { return @@ -708,10 +941,10 @@ func (n *node[K, T]) forEachShortestPrefixFirst(fn func(prefix uint, key Key[K], } // push the children, if any, into the heap if n.children[0] != nil { - nodes.push(n.children[0]) + nodes.pushHeap(n.children[0]) } if n.children[1] != nil { - nodes.push(n.children[1]) + nodes.pushHeap(n.children[1]) } } } diff --git a/vendor/github.com/cilium/cilium/pkg/container/set/set.go b/vendor/github.com/cilium/cilium/pkg/container/set/set.go new file mode 100644 index 0000000000..5c7dab269d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/container/set/set.go @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package set + +import ( + "fmt" + "iter" + "maps" +) + +type empty struct{} + +// Set contains zero, one, or more members. Zero or one members do not consume any additional +// storage, more than one members are held in an non-exported membersMap. +type Set[T comparable] struct { + single *T + members map[T]empty +} + +// Empty returns 'true' if the set is empty. +func (s Set[T]) Empty() bool { + return s.single == nil && s.members == nil +} + +// Len returns the number of members in the set. +func (s Set[T]) Len() int { + if s.single != nil { + return 1 + } + return len(s.members) +} + +func (s Set[T]) String() string { + if s.single != nil { + return fmt.Sprintf("%v", s.single) + } + res := "" + for m := range s.members { + if res != "" { + res += "," + } + res += fmt.Sprintf("%v", m) + } + return res +} + +// NewSet returns a Set initialized to contain the members in 'members'. +func NewSet[T comparable](members ...T) Set[T] { + s := Set[T]{} + for _, member := range members { + s.Insert(member) + } + return s +} + +// Has returns 'true' if 'member' is in the set. +func (s Set[T]) Has(member T) bool { + if s.single != nil { + return *s.single == member + } + _, ok := s.members[member] + return ok +} + +// Insert inserts a member to the set. +// Returns 'true' when '*s' value has changed, +// so that if it is stored by value the caller must knows to update the stored value. +func (s *Set[T]) Insert(member T) (changed bool) { + switch s.Len() { + case 0: + s.single = &member + return true + case 1: + if member == *s.single { + return false + } + s.members = make(map[T]empty, 2) + s.members[*s.single] = empty{} + s.single = nil + s.members[member] = empty{} + return true + default: + s.members[member] = empty{} + return false + } +} + +// Merge inserts members in 'o' into to the set 's'. +// Returns 'true' when '*s' value has changed, +// so that if it is stored by value the caller must knows to update the stored value. +func (s *Set[T]) Merge(sets ...Set[T]) (changed bool) { + for _, other := range sets { + for m := range other.Members() { + changed = s.Insert(m) || changed + } + } + return changed +} + +// Remove removes a member from the set. +// Returns 'true' when '*s' value was changed, so that if it is stored by value the caller knows to +// update the stored value. +func (s *Set[T]) Remove(member T) (changed bool) { + length := s.Len() + switch length { + case 0: + case 1: + if *s.single == member { + s.single = nil + return true + } + case 2: + delete(s.members, member) + if len(s.members) == 1 { + for m := range s.members { + s.single = &m + } + s.members = nil + return true + } + default: + delete(s.members, member) + } + return false +} + +// RemoveSets removes one or more Sets from the receiver set. +// Returns 'true' when '*s' value was changed, so that if it is stored by value the caller knows to +// update the stored value. +func (s *Set[T]) RemoveSets(sets ...Set[T]) (changed bool) { + for _, other := range sets { + for m := range other.Members() { + changed = s.Remove(m) || changed + } + } + return changed +} + +// Clear makes the set '*s' empty. +func (s *Set[T]) Clear() { + s.single = nil + s.members = nil +} + +// Equal returns 'true' if the receiver and argument sets are the same. +func (s Set[T]) Equal(o Set[T]) bool { + sLen := s.Len() + oLen := o.Len() + + if sLen != oLen { + return false + } + + switch sLen { + case 0: + return true + case 1: + return *s.single == *o.single + } + // compare the elements of the maps + for member := range s.members { + if _, ok := o.members[member]; !ok { + return false + } + } + return true +} + +// Members returns an iterator for the members in the set. +func (s Set[T]) Members() iter.Seq[T] { + return func(yield func(m T) bool) { + if s.single != nil { + yield(*s.single) + } else { + for member := range s.members { + if !yield(member) { + return + } + } + } + } +} + +// MembersOfType return an iterator for each member of type M in the set. +func MembersOfType[M any, T comparable](s Set[T]) iter.Seq[M] { + return func(yield func(m M) bool) { + if s.single != nil { + if v, ok := any(*s.single).(M); ok { + yield(v) + } + } else { + for m := range s.members { + if v, ok := any(m).(M); ok { + if !yield(v) { + return + } + } + } + } + } +} + +// Get returns any one member from the set. +// Useful when it is known that the set has only one element. +func (s Set[T]) Get() (m T, found bool) { + length := s.Len() + + switch length { + case 0: + case 1: + m = *s.single + default: + for m = range s.members { + break + } + } + return m, length > 0 +} + +// Clone returns a copy of the set. +func (s Set[T]) Clone() Set[T] { + if s.members != nil { + return Set[T]{members: maps.Clone(s.members)} + } + return s // singular value or empty Set +} diff --git a/vendor/github.com/cilium/cilium/pkg/container/versioned/value.go b/vendor/github.com/cilium/cilium/pkg/container/versioned/value.go index f446845349..29c4bfe9e9 100644 --- a/vendor/github.com/cilium/cilium/pkg/container/versioned/value.go +++ b/vendor/github.com/cilium/cilium/pkg/container/versioned/value.go @@ -280,7 +280,10 @@ func (v *Coordinator) clean() { // 'keepVersion' is the current version if there are no outstanding VersionHandles keepVersion := v.version if len(v.versions) > 0 { - keepVersion = v.versions[0].version + // otherwise it is the oldest version for which there is an outstanding handle, if + // older than the current version, as if there was an implicit outstanding handle + // for the current version. + keepVersion = min(v.version, v.versions[0].version) } // Call the cleaner for 'keepVersion' only if not already called for this 'keepVersion'. diff --git a/vendor/github.com/cilium/cilium/pkg/controller/controller.go b/vendor/github.com/cilium/cilium/pkg/controller/controller.go index 6ee59316bf..0283b3247b 100644 --- a/vendor/github.com/cilium/cilium/pkg/controller/controller.go +++ b/vendor/github.com/cilium/cilium/pkg/controller/controller.go @@ -8,11 +8,11 @@ import ( "errors" "fmt" "math" + stdtime "time" "github.com/cilium/hive/cell" "github.com/sirupsen/logrus" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/metrics" "github.com/cilium/cilium/pkg/time" @@ -242,9 +242,6 @@ func (c *controller) GetLastErrorTimestamp() time.Time { func (c *controller) runController(params ControllerParams) { errorRetries := 1 - runTimer, timerDone := inctimer.New() - defer timerDone() - for { var err error @@ -325,7 +322,7 @@ func (c *controller) runController(params ControllerParams) { case params = <-c.update: // update channel is never closed - case <-runTimer.After(interval): + case <-stdtime.After(interval): // timer channel is not yet closed case <-c.trigger: // trigger channel is never closed diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go index 596e30ab19..d80f27b97b 100644 --- a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go @@ -11,6 +11,7 @@ import ( "github.com/vishvananda/netlink" + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" "github.com/cilium/cilium/pkg/netns" ) @@ -70,7 +71,7 @@ func haveManagedNeighbors() (outer error) { return fmt.Errorf("failed to add neighbor: %w", err) } - nl, err := netlink.NeighList(veth.Index, 0) + nl, err := safenetlink.NeighList(veth.Index, 0) if err != nil { return fmt.Errorf("failed to list neighbors: %w", err) } diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go index ec10584208..dcf0950dcd 100644 --- a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "math" "net" "os" "path/filepath" @@ -392,6 +393,13 @@ func HaveFibIfindex() error { return features.HaveProgramHelper(ebpf.SchedCLS, asm.FnRedirectPeer) } +// HaveWriteableQueueMapping checks if kernel has 74e31ca850c1 ("bpf: add +// skb->queue_mapping write access from tc clsact") which is 5.1+. This got merged +// in the same kernel as the bpf_skb_ecn_set_ce() helper. +func HaveWriteableQueueMapping() error { + return features.HaveProgramHelper(ebpf.SchedCLS, asm.FnSkbEcnSetCe) +} + // HaveV2ISA is a wrapper around features.HaveV2ISA() to check if the kernel // supports the V2 ISA. // On unexpected probe results this function will terminate with log.Fatal(). @@ -462,6 +470,51 @@ var HaveTCX = sync.OnceValue(func() error { }) }) +// HaveNetkit returns nil if the running kernel supports attaching bpf programs +// to netkit devices. +var HaveNetkit = sync.OnceValue(func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "Apache-2.0", + }) + if err != nil { + return err + } + defer prog.Close() + + ns, err := netns.New() + if err != nil { + return fmt.Errorf("create netns: %w", err) + } + defer ns.Close() + + return ns.Do(func() error { + l, err := link.AttachNetkit(link.NetkitOptions{ + Program: prog, + Attach: ebpf.AttachNetkitPrimary, + Interface: math.MaxInt, + }) + // We rely on this being checked during the syscall. With + // an otherwise correct payload we expect ENODEV here as + // an indication that the feature is present. + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return fmt.Errorf("creating link: %w", err) + } + if err := l.Close(); err != nil { + return fmt.Errorf("closing link: %w", err) + } + + return fmt.Errorf("unexpected success: %w", err) + }) +}) + // HaveOuterSourceIPSupport tests whether the kernel support setting the outer // source IP address via the bpf_skb_set_tunnel_key BPF helper. We can't rely // on the verifier to reject a program using the new support because the diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_linux.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_linux.go new file mode 100644 index 0000000000..8b1216dd90 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_linux.go @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package safenetlink + +import ( + "context" + "errors" + "net" + + "github.com/vishvananda/netlink" + "github.com/vishvananda/netlink/nl" + + "github.com/cilium/cilium/pkg/resiliency" + "github.com/cilium/cilium/pkg/time" +) + +const ( + netlinkRetryInterval = 1 * time.Millisecond + netlinkRetryMax = 30 +) + +// WithRetry runs the netlinkFunc. If netlinkFunc returns netlink.ErrDumpInterrupted, the function is retried. +// If success or any other error is returned, WithRetry returns immediately, propagating the error. +func WithRetry(netlinkFunc func() error) error { + return resiliency.Retry(context.Background(), netlinkRetryInterval, netlinkRetryMax, func(ctx context.Context, retries int) (bool, error) { + err := netlinkFunc() + if errors.Is(err, netlink.ErrDumpInterrupted) { + return false, nil // retry + } + + return true, err + }) +} + +// WithRetryResult works like WithRetry, but allows netlinkFunc to have a return value besides the error +func WithRetryResult[T any](netlinkFunc func() (T, error)) (out T, err error) { + err = WithRetry(func() error { + out, err = netlinkFunc() + return err + }) + return out, err +} + +// AddrList wraps netlink.AddrList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func AddrList(link netlink.Link, family int) ([]netlink.Addr, error) { + return WithRetryResult(func() ([]netlink.Addr, error) { + return netlink.AddrList(link, family) + }) +} + +// BridgeVlanList wraps netlink.BridgeVlanList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { + return WithRetryResult(func() (map[int32][]*nl.BridgeVlanInfo, error) { + return netlink.BridgeVlanList() + }) +} + +// ChainList wraps netlink.ChainList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func ChainList(link netlink.Link, parent uint32) ([]netlink.Chain, error) { + return WithRetryResult(func() ([]netlink.Chain, error) { + return netlink.ChainList(link, parent) + }) +} + +// ClassList wraps netlink.ClassList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func ClassList(link netlink.Link, parent uint32) ([]netlink.Class, error) { + return WithRetryResult(func() ([]netlink.Class, error) { + return netlink.ClassList(link, parent) + }) +} + +// ConntrackTableList wraps netlink.ConntrackTableList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func ConntrackTableList(table netlink.ConntrackTableType, family netlink.InetFamily) ([]*netlink.ConntrackFlow, error) { + return WithRetryResult(func() ([]*netlink.ConntrackFlow, error) { + return netlink.ConntrackTableList(table, family) + }) +} + +// DevLinkGetDeviceList wraps netlink.DevLinkGetDeviceList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func DevLinkGetDeviceList() ([]*netlink.DevlinkDevice, error) { + return WithRetryResult(func() ([]*netlink.DevlinkDevice, error) { + return netlink.DevLinkGetDeviceList() + }) +} + +// DevLinkGetAllPortList wraps netlink.DevLinkGetAllPortList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func DevLinkGetAllPortList() ([]*netlink.DevlinkPort, error) { + return WithRetryResult(func() ([]*netlink.DevlinkPort, error) { + return netlink.DevLinkGetAllPortList() + }) +} + +// DevlinkGetDeviceParams wraps netlink.DevlinkGetDeviceParams, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func DevlinkGetDeviceParams(bus string, device string) ([]*netlink.DevlinkParam, error) { + return WithRetryResult(func() ([]*netlink.DevlinkParam, error) { + return netlink.DevlinkGetDeviceParams(bus, device) + }) +} + +// FilterList wraps netlink.FilterList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) { + return WithRetryResult(func() ([]netlink.Filter, error) { + return netlink.FilterList(link, parent) + }) +} + +// FouList wraps netlink.FouList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func FouList(fam int) ([]netlink.Fou, error) { + return WithRetryResult(func() ([]netlink.Fou, error) { + return netlink.FouList(fam) + }) +} + +// GenlFamilyList wraps netlink.GenlFamilyList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func GenlFamilyList() ([]*netlink.GenlFamily, error) { + return WithRetryResult(func() ([]*netlink.GenlFamily, error) { + return netlink.GenlFamilyList() + }) +} + +// GTPPDPList wraps netlink.GTPPDPList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func GTPPDPList() ([]*netlink.PDP, error) { + return WithRetryResult(func() ([]*netlink.PDP, error) { + return netlink.GTPPDPList() + }) +} + +// LinkByName wraps netlink.LinkByName, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkByName(name string) (netlink.Link, error) { + return WithRetryResult(func() (netlink.Link, error) { + return netlink.LinkByName(name) + }) +} + +// LinkByAlias wraps netlink.LinkByAlias, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkByAlias(alias string) (netlink.Link, error) { + return WithRetryResult(func() (netlink.Link, error) { + return netlink.LinkByAlias(alias) + }) +} + +// LinkList wraps netlink.LinkList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkList() ([]netlink.Link, error) { + return WithRetryResult(func() ([]netlink.Link, error) { + return netlink.LinkList() + }) +} + +// LinkSubscribeWithOptions wraps netlink.LinkSubscribeWithOptions, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkSubscribeWithOptions(ch chan<- netlink.LinkUpdate, done <-chan struct{}, options netlink.LinkSubscribeOptions) error { + return WithRetry(func() error { + return netlink.LinkSubscribeWithOptions(ch, done, options) + }) +} + +// NeighList wraps netlink.NeighList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func NeighList(linkIndex, family int) ([]netlink.Neigh, error) { + return WithRetryResult(func() ([]netlink.Neigh, error) { + return netlink.NeighList(linkIndex, family) + }) +} + +// NeighProxyList wraps netlink.NeighProxyList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func NeighProxyList(linkIndex, family int) ([]netlink.Neigh, error) { + return WithRetryResult(func() ([]netlink.Neigh, error) { + return netlink.NeighProxyList(linkIndex, family) + }) +} + +// NeighListExecute wraps netlink.NeighListExecute, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func NeighListExecute(msg netlink.Ndmsg) ([]netlink.Neigh, error) { + return WithRetryResult(func() ([]netlink.Neigh, error) { + return netlink.NeighListExecute(msg) + }) +} + +// LinkGetProtinfo wraps netlink.LinkGetProtinfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) { + return WithRetryResult(func() (netlink.Protinfo, error) { + return netlink.LinkGetProtinfo(link) + }) +} + +// QdiscList wraps netlink.QdiscList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func QdiscList(link netlink.Link) ([]netlink.Qdisc, error) { + return WithRetryResult(func() ([]netlink.Qdisc, error) { + return netlink.QdiscList(link) + }) +} + +// RdmaLinkList wraps netlink.RdmaLinkList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RdmaLinkList() ([]*netlink.RdmaLink, error) { + return WithRetryResult(func() ([]*netlink.RdmaLink, error) { + return netlink.RdmaLinkList() + }) +} + +// RdmaLinkByName wraps netlink.RdmaLinkByName, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RdmaLinkByName(name string) (*netlink.RdmaLink, error) { + return WithRetryResult(func() (*netlink.RdmaLink, error) { + return netlink.RdmaLinkByName(name) + }) +} + +// RdmaLinkDel wraps netlink.RdmaLinkDel, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RdmaLinkDel(name string) error { + return WithRetry(func() error { + return netlink.RdmaLinkDel(name) + }) +} + +// RouteList wraps netlink.RouteList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteList(link netlink.Link, family int) ([]netlink.Route, error) { + return WithRetryResult(func() ([]netlink.Route, error) { + return netlink.RouteList(link, family) + }) +} + +// RouteListFiltered wraps netlink.RouteListFiltered, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + return WithRetryResult(func() ([]netlink.Route, error) { + return netlink.RouteListFiltered(family, filter, filterMask) + }) +} + +// RouteListFilteredIter wraps netlink.RouteListFilteredIter, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteListFilteredIter(family int, filter *netlink.Route, filterMask uint64, f func(netlink.Route) (cont bool)) error { + return WithRetry(func() error { + return netlink.RouteListFilteredIter(family, filter, filterMask, f) + }) +} + +// RouteSubscribeWithOptions wraps netlink.RouteSubscribeWithOptions, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteSubscribeWithOptions(ch chan<- netlink.RouteUpdate, done <-chan struct{}, options netlink.RouteSubscribeOptions) error { + return WithRetry(func() error { + return netlink.RouteSubscribeWithOptions(ch, done, options) + }) +} + +// RuleList wraps netlink.RuleList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RuleList(family int) ([]netlink.Rule, error) { + return WithRetryResult(func() ([]netlink.Rule, error) { + return netlink.RuleList(family) + }) +} + +// RuleListFiltered wraps netlink.RuleListFiltered, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) { + return WithRetryResult(func() ([]netlink.Rule, error) { + return netlink.RuleListFiltered(family, filter, filterMask) + }) +} + +// SocketGet wraps netlink.SocketGet, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketGet(local, remote net.Addr) (*netlink.Socket, error) { + return WithRetryResult(func() (*netlink.Socket, error) { + return netlink.SocketGet(local, remote) + }) +} + +// SocketDiagTCPInfo wraps netlink.SocketDiagTCPInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagTCPInfo(family uint8) ([]*netlink.InetDiagTCPInfoResp, error) { + return WithRetryResult(func() ([]*netlink.InetDiagTCPInfoResp, error) { + return netlink.SocketDiagTCPInfo(family) + }) +} + +// SocketDiagTCP wraps netlink.SocketDiagTCP, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagTCP(family uint8) ([]*netlink.Socket, error) { + return WithRetryResult(func() ([]*netlink.Socket, error) { + return netlink.SocketDiagTCP(family) + }) +} + +// SocketDiagUDPInfo wraps netlink.SocketDiagUDPInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagUDPInfo(family uint8) ([]*netlink.InetDiagUDPInfoResp, error) { + return WithRetryResult(func() ([]*netlink.InetDiagUDPInfoResp, error) { + return netlink.SocketDiagUDPInfo(family) + }) +} + +// SocketDiagUDP wraps netlink.SocketDiagUDP, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagUDP(family uint8) ([]*netlink.Socket, error) { + return WithRetryResult(func() ([]*netlink.Socket, error) { + return netlink.SocketDiagUDP(family) + }) +} + +// UnixSocketDiagInfo wraps netlink.UnixSocketDiagInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func UnixSocketDiagInfo() ([]*netlink.UnixDiagInfoResp, error) { + return WithRetryResult(func() ([]*netlink.UnixDiagInfoResp, error) { + return netlink.UnixSocketDiagInfo() + }) +} + +// UnixSocketDiag wraps netlink.UnixSocketDiag, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func UnixSocketDiag() ([]*netlink.UnixSocket, error) { + return WithRetryResult(func() ([]*netlink.UnixSocket, error) { + return netlink.UnixSocketDiag() + }) +} + +// SocketXDPGetInfo wraps netlink.SocketXDPGetInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketXDPGetInfo(ino uint32, cookie uint64) (*netlink.XDPDiagInfoResp, error) { + return WithRetryResult(func() (*netlink.XDPDiagInfoResp, error) { + return netlink.SocketXDPGetInfo(ino, cookie) + }) +} + +// SocketDiagXDP wraps netlink.SocketDiagXDP, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagXDP() ([]*netlink.XDPDiagInfoResp, error) { + return WithRetryResult(func() ([]*netlink.XDPDiagInfoResp, error) { + return netlink.SocketDiagXDP() + }) +} + +// VDPAGetDevList wraps netlink.VDPAGetDevList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func VDPAGetDevList() ([]*netlink.VDPADev, error) { + return WithRetryResult(func() ([]*netlink.VDPADev, error) { + return netlink.VDPAGetDevList() + }) +} + +// VDPAGetDevConfigList wraps netlink.VDPAGetDevConfigList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func VDPAGetDevConfigList() ([]*netlink.VDPADevConfig, error) { + return WithRetryResult(func() ([]*netlink.VDPADevConfig, error) { + return netlink.VDPAGetDevConfigList() + }) +} + +// VDPAGetMGMTDevList wraps netlink.VDPAGetMGMTDevList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func VDPAGetMGMTDevList() ([]*netlink.VDPAMGMTDev, error) { + return WithRetryResult(func() ([]*netlink.VDPAMGMTDev, error) { + return netlink.VDPAGetMGMTDevList() + }) +} + +// XfrmPolicyList wraps netlink.XfrmPolicyList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func XfrmPolicyList(family int) ([]netlink.XfrmPolicy, error) { + return WithRetryResult(func() ([]netlink.XfrmPolicy, error) { + return netlink.XfrmPolicyList(family) + }) +} + +// XfrmStateList wraps netlink.XfrmStateList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func XfrmStateList(family int) ([]netlink.XfrmState, error) { + return WithRetryResult(func() ([]netlink.XfrmState, error) { + return netlink.XfrmStateList(family) + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_unspecified.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_unspecified.go new file mode 100644 index 0000000000..046c03f99e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_unspecified.go @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +// This file duplicates the stubs that exist in vishvananda/netlink outside the linux build. Not all +// functions defined in found in netlink_linux.go are present here, because not all have a stub in +// vishvananda/netlink, and thus some of the necessary function signature types are missing outside +// the linux build. + +package safenetlink + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func AddrList(link netlink.Link, family int) ([]netlink.Addr, error) { + return nil, netlink.ErrNotImplemented +} + +func ChainList(link netlink.Link, parent uint32) ([]netlink.Chain, error) { + return nil, netlink.ErrNotImplemented +} + +func ClassList(link netlink.Link, parent uint32) ([]netlink.Class, error) { + return nil, netlink.ErrNotImplemented +} + +func ConntrackTableList(table netlink.ConntrackTableType, family netlink.InetFamily) ([]*netlink.ConntrackFlow, error) { + return nil, netlink.ErrNotImplemented +} + +func FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) { + return nil, netlink.ErrNotImplemented +} + +func FouList(fam int) ([]netlink.Fou, error) { + return nil, netlink.ErrNotImplemented +} + +func GenlFamilyList() ([]*netlink.GenlFamily, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkByName(name string) (netlink.Link, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkByAlias(alias string) (netlink.Link, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkList() ([]netlink.Link, error) { + return nil, netlink.ErrNotImplemented +} + +func NeighList(linkIndex, family int) ([]netlink.Neigh, error) { + return nil, netlink.ErrNotImplemented +} + +func NeighProxyList(linkIndex, family int) ([]netlink.Neigh, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) { + return netlink.Protinfo{}, netlink.ErrNotImplemented +} + +func QdiscList(link netlink.Link) ([]netlink.Qdisc, error) { + return nil, netlink.ErrNotImplemented +} + +func RdmaLinkDel(name string) error { + return netlink.ErrNotImplemented +} + +func RouteList(link netlink.Link, family int) ([]netlink.Route, error) { + return nil, netlink.ErrNotImplemented +} + +func RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + return nil, netlink.ErrNotImplemented +} + +func RouteListFilteredIter(family int, filter *netlink.Route, filterMask uint64, f func(netlink.Route) (cont bool)) error { + return netlink.ErrNotImplemented +} + +func RuleList(family int) ([]netlink.Rule, error) { + return nil, netlink.ErrNotImplemented +} + +func RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketGet(local, remote net.Addr) (*netlink.Socket, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagTCPInfo(family uint8) ([]*netlink.InetDiagTCPInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagTCP(family uint8) ([]*netlink.Socket, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagUDPInfo(family uint8) ([]*netlink.InetDiagUDPInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagUDP(family uint8) ([]*netlink.Socket, error) { + return nil, netlink.ErrNotImplemented +} + +func UnixSocketDiagInfo() ([]*netlink.UnixDiagInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func UnixSocketDiag() ([]*netlink.UnixSocket, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketXDPGetInfo(ino uint32, cookie uint64) (*netlink.XDPDiagInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagXDP() ([]*netlink.XDPDiagInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func XfrmPolicyList(family int) ([]netlink.XfrmPolicy, error) { + return nil, netlink.ErrNotImplemented +} + +func XfrmStateList(family int) ([]netlink.XfrmState, error) { + return nil, netlink.ErrNotImplemented +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go b/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go index c914a411f8..54b2f176cd 100644 --- a/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go +++ b/vendor/github.com/cilium/cilium/pkg/datapath/tunnel/tunnel.go @@ -9,9 +9,9 @@ import ( "github.com/cilium/hive/cell" "github.com/spf13/pflag" - "github.com/vishvananda/netlink" dpcfgdef "github.com/cilium/cilium/pkg/datapath/linux/config/defines" + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" "github.com/cilium/cilium/pkg/defaults" ) @@ -162,7 +162,7 @@ func (cfg Config) datapathConfigProvider() (dpcfgdef.NodeOut, dpcfgdef.NodeFnOut defines["TUNNEL_PORT"] = fmt.Sprintf("%d", cfg.Port()) definesFn = func() (dpcfgdef.Map, error) { - tunnelDev, err := netlink.LinkByName(cfg.DeviceName()) + tunnelDev, err := safenetlink.LinkByName(cfg.DeviceName()) if err != nil { return nil, fmt.Errorf("failed to retrieve device info for %q: %w", cfg.DeviceName(), err) } diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go index 386fee101f..f4fafb5eec 100644 --- a/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go @@ -40,7 +40,7 @@ type PreFilter interface { // Proxy is any type which installs rules related to redirecting traffic to // a proxy. type Proxy interface { - ReinstallRoutingRules() error + ReinstallRoutingRules(mtu int) error } // IptablesManager manages iptables rules. diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go index ecddfa02c8..9b741a138b 100644 --- a/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go @@ -174,6 +174,10 @@ type LocalNodeConfiguration struct { // XDPConfig holds configuration options to determine how the node should // handle XDP programs. XDPConfig xdp.Config + + // RoutingMode is the current routing mode of the local node. + // Can be 'native' or 'tunnel'. + RoutingMode string } func (cfg *LocalNodeConfiguration) DeviceNames() []string { diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/zz_generated.deepequal.go index c0b1bc0895..f4b908812e 100644 --- a/vendor/github.com/cilium/cilium/pkg/datapath/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/zz_generated.deepequal.go @@ -255,5 +255,9 @@ func (in *LocalNodeConfiguration) DeepEqual(other *LocalNodeConfiguration) bool return false } + if in.RoutingMode != other.RoutingMode { + return false + } + return true } diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go index f73cfded0b..995ea59f8f 100644 --- a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go +++ b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go @@ -72,43 +72,9 @@ const ( // SockPathEnv is the environment variable to overwrite SockPath SockPathEnv = "CILIUM_SOCK" - // HubbleSockPath is the path to the UNIX domain socket exposing the Hubble - // API to clients locally. - HubbleSockPath = RuntimePath + "/hubble.sock" - - // HubbleSockPathEnv is the environment variable to overwrite - // HubbleSockPath. - HubbleSockPathEnv = "HUBBLE_SOCK" - - // HubbleRecorderStoragePath specifies the directory in which pcap files - // created via the Hubble Recorder API are stored - HubbleRecorderStoragePath = RuntimePath + "/pcaps" - - // HubbleRecorderSinkQueueSize is the queue size for each recorder sink - HubbleRecorderSinkQueueSize = 1024 - - // HubbleRedactEnabled controls if sensitive information will be redacted from L7 flows - HubbleRedactEnabled = false - - // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows - HubbleRedactHttpURLQuery = false - - // HubbleRedactHttpUserInfo controls if the user info will be redacted from flows - HubbleRedactHttpUserInfo = true - - // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows - HubbleRedactKafkaApiKey = false - - // HubbleDropEventsEnabled controls whether Hubble should create v1.Events - // for packet drops related to pods - HubbleDropEventsEnabled = false - - // HubbleDropEventsInterval controls the minimum time between emitting events - // with the same source and destination IP - HubbleDropEventsInterval = 2 * time.Minute - - // HubbleDropEventsReasons controls which drop reasons to emit events for - HubbleDropEventsReasons = "auth_required,policy_denied" + // ShellSockPath is the path to the UNIX domain socket exposing the debug shell + // to which "cilium-dbg shell" connects to. + ShellSockPath = RuntimePath + "/shell.sock" // MonitorSockPath1_2 is the path to the UNIX domain socket used to // distribute BPF and agent events to listeners. @@ -571,6 +537,13 @@ const ( // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy = true + // EnableCiliumNetworkPolicy enables support for Cilium Network Policy. + EnableCiliumNetworkPolicy = true + + // EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide + // Network Policy. + EnableCiliumClusterwideNetworkPolicy = true + // MaxConnectedClusters sets the maximum number of clusters that can be // connected in a clustermesh. // The value is used to determine the bit allocation for cluster ID and @@ -590,8 +563,8 @@ const ( // BPFEventsTraceEnabled controls whether the Cilium datapath exposes "trace" events to Cilium monitor and Hubble. BPFEventsTraceEnabled = true - // BPFConntrackAccountingEnabled controls whether CT accounting for packets and bytes is enabled - BPFConntrackAccountingEnabled = false + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled + BPFConntrackAccounting = false // EnableEnvoyConfig is the default value for option.EnableEnvoyConfig EnableEnvoyConfig = false @@ -604,6 +577,9 @@ const ( // EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode EnableNonDefaultDenyPolicies = true + + // EnableSourceIPVerification is the default value for source ip validation + EnableSourceIPVerification = true ) var ( diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/client.go b/vendor/github.com/cilium/cilium/pkg/health/client/client.go index 2867cfaaa6..c30e1b25f4 100644 --- a/vendor/github.com/cilium/cilium/pkg/health/client/client.go +++ b/vendor/github.com/cilium/cilium/pkg/health/client/client.go @@ -320,63 +320,88 @@ func GetAllEndpointAddresses(node *models.NodeStatus) []*models.PathStatus { return append([]*models.PathStatus{node.HealthEndpoint.PrimaryAddress}, node.HealthEndpoint.SecondaryAddresses...) } -func formatNodeStatus(w io.Writer, node *models.NodeStatus, printAll, succinct, verbose, localhost bool) { +func formatNodeStatus(w io.Writer, node *models.NodeStatus, allNodes, verbose, localhost bool) bool { localStr := "" if localhost { localStr = " (localhost)" } - if succinct { - if printAll || !nodeIsHealthy(node) { - ips := []string{getPrimaryAddressIP(node)} - for _, addr := range GetHostSecondaryAddresses(node) { - if addr == nil { - continue - } - ips = append(ips, addr.IP) - } - hostStatuses := SummarizePathConnectivityStatusType(GetAllHostAddresses(node)) - endpointStatuses := SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node)) - fmt.Fprintf(w, " %s%s\t%s\t%d/%d", node.Name, localStr, strings.Join(ips, ","), hostStatuses[ConnStatusReachable], len(GetAllHostAddresses(node))) - if hostStatuses[ConnStatusUnknown] > 0 { - fmt.Fprintf(w, " (%d unknown)", hostStatuses[ConnStatusUnknown]) - } - fmt.Fprintf(w, "\t%d/%d", endpointStatuses[ConnStatusReachable], len(GetAllEndpointAddresses(node))) - if endpointStatuses[ConnStatusUnknown] > 0 { - fmt.Fprintf(w, " (%d unknown)", endpointStatuses[ConnStatusUnknown]) - } - fmt.Fprintf(w, "\n") - } - } else { + if verbose { fmt.Fprintf(w, " %s%s:\n", node.Name, localStr) formatPathStatus(w, "Host", GetHostPrimaryAddress(node), " ", verbose) unhealthyPaths := !allPathsAreHealthyOrUnknown(GetHostSecondaryAddresses(node)) if (verbose || unhealthyPaths) && node.Host != nil { for _, addr := range node.Host.SecondaryAddresses { - formatPathStatus(w, "Secondary", addr, " ", verbose) + formatPathStatus(w, "Secondary Host", addr, " ", verbose) } } formatPathStatus(w, "Endpoint", GetEndpointPrimaryAddress(node), " ", verbose) unhealthyPaths = !allPathsAreHealthyOrUnknown(GetEndpointSecondaryAddresses(node)) if (verbose || unhealthyPaths) && node.HealthEndpoint != nil { for _, addr := range node.HealthEndpoint.SecondaryAddresses { - formatPathStatus(w, "Secondary", addr, " ", verbose) + formatPathStatus(w, "Secondary Endpoint", addr, " ", verbose) + } + } + return true + } + + hostStatuses := SummarizePathConnectivityStatusType(GetAllHostAddresses(node)) + endpointStatuses := SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node)) + + if !nodeIsHealthy(node) { + ips := []string{getPrimaryAddressIP(node)} + for _, addr := range GetHostSecondaryAddresses(node) { + if addr == nil { + continue + } + ips = append(ips, addr.IP) + } + fmt.Fprintf(w, " %s%s\t%s\t%d/%d", node.Name, localStr, strings.Join(ips, ","), hostStatuses[ConnStatusReachable], len(GetAllHostAddresses(node))) + if hostStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", hostStatuses[ConnStatusUnknown]) + } + fmt.Fprintf(w, "\t%d/%d", endpointStatuses[ConnStatusReachable], len(GetAllEndpointAddresses(node))) + if endpointStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", endpointStatuses[ConnStatusUnknown]) + } + fmt.Fprintf(w, "\n") + return true + } + + if allNodes { + ips := []string{getPrimaryAddressIP(node)} + for _, addr := range GetHostSecondaryAddresses(node) { + if addr == nil { + continue } + ips = append(ips, addr.IP) + } + fmt.Fprintf(w, " %s%s\t%s\t%d/%d", node.Name, localStr, strings.Join(ips, ","), hostStatuses[ConnStatusReachable], len(GetAllHostAddresses(node))) + if hostStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", hostStatuses[ConnStatusUnknown]) } + fmt.Fprintf(w, "\t%d/%d", endpointStatuses[ConnStatusReachable], len(GetAllEndpointAddresses(node))) + if endpointStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", endpointStatuses[ConnStatusUnknown]) + } + fmt.Fprintf(w, "\n") + return true } + + return false } // FormatHealthStatusResponse writes a HealthStatusResponse as a string to the // writer. // -// 'printAll', if true, causes all nodes to be printed regardless of status -// 'succinct', if true, causes node health to be output as one line per node -// 'verbose', if true, overrides 'succinct' and prints all information +// 'allNodes', if true, causes all nodes to be printed regardless of status +// 'verbose', if true, prints all information // 'maxLines', if nonzero, determines the maximum number of lines to print -func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, printAll, succinct, verbose bool, maxLines int) { +func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, allNodes bool, verbose bool, maxLines int) { var ( - healthy int - localhost *models.NodeStatus + healthy int + localhost *models.NodeStatus + printedLines int ) for _, node := range sr.Nodes { if nodeIsHealthy(node) { @@ -386,37 +411,35 @@ func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, pr localhost = node } } - if succinct { - fmt.Fprintf(w, "Cluster health:\t%d/%d reachable\t(%s)\n", - healthy, len(sr.Nodes), sr.Timestamp) - if printAll || healthy < len(sr.Nodes) { - fmt.Fprintf(w, " Name\tIP\tNode\tEndpoints\n") - } - } else { - fmt.Fprintf(w, "Probe time:\t%s\n", sr.Timestamp) - fmt.Fprintf(w, "Nodes:\n") - } + + fmt.Fprintf(w, "Cluster health:\t%d/%d reachable\t(%s)\n", + healthy, len(sr.Nodes), sr.Timestamp) + + fmt.Fprintf(w, "Name\tIP\tNode\tEndpoints\n") if localhost != nil { - formatNodeStatus(w, localhost, printAll, succinct, verbose, true) - maxLines-- + if formatNodeStatus(w, localhost, allNodes, verbose, true) { + printedLines++ + } } nodes := sr.Nodes sort.Slice(nodes, func(i, j int) bool { return strings.Compare(nodes[i].Name, nodes[j].Name) < 0 }) - for n, node := range nodes { - if maxLines > 0 && n > maxLines { + for _, node := range nodes { + if printedLines == maxLines { break } if node == localhost { continue } - formatNodeStatus(w, node, printAll, succinct, verbose, false) + if formatNodeStatus(w, node, allNodes, verbose, false) { + printedLines++ + } } - if maxLines > 0 && len(sr.Nodes)-healthy > maxLines { - fmt.Fprintf(w, " ...") + if len(sr.Nodes)-printedLines-healthy > 0 { + fmt.Fprintf(w, " ...\n") } } @@ -424,9 +447,9 @@ func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, pr // daemon via the default channel and formats its output as a string to the // writer. // -// 'succinct', 'verbose' and 'maxLines' are handled the same as in +// 'verbose' and 'maxLines' are handled the same as in // FormatHealthStatusResponse(). -func GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) { +func GetAndFormatHealthStatus(w io.Writer, allNodes bool, verbose bool, maxLines int) { client, err := NewClient("") if err != nil { fmt.Fprintf(w, "Cluster health:\t\t\tClient error: %s\n", err) @@ -438,5 +461,5 @@ func GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) fmt.Fprintf(w, "Cluster health:\t\t\tWarning\tcilium-health daemon unreachable\n") return } - FormatHealthStatusResponse(w, hr.Payload, verbose, succinct, verbose, maxLines) + FormatHealthStatusResponse(w, hr.Payload, allNodes, verbose, maxLines) } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/feature_lifecycle.go b/vendor/github.com/cilium/cilium/pkg/hive/feature_lifecycle.go deleted file mode 100644 index 2b95718301..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/hive/feature_lifecycle.go +++ /dev/null @@ -1,128 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package hive - -import ( - "context" - "errors" - "fmt" - "log/slog" - "maps" - "slices" - - "github.com/cilium/hive/cell" - - "github.com/cilium/cilium/pkg/lock" -) - -type FeatureLifecycleInterface interface { - Append(Feature, cell.Hook) error - Start(Feature, context.Context, *slog.Logger) error - Stop(Feature, context.Context, *slog.Logger) error - - IsRunning(Feature) bool - List() []Feature -} - -type Feature string -type FeatureLifecycle struct { - mu lock.Mutex - hooks map[Feature][]cell.Hook - status map[Feature]bool -} - -func NewFeatureLifecycle() *FeatureLifecycle { - return &FeatureLifecycle{ - hooks: make(map[Feature][]cell.Hook), - status: make(map[Feature]bool), - } -} - -// Append adds a hook to the feature hooks, marking the feature as not running. -// It returns an error if the feature is already running. -func (fl *FeatureLifecycle) Append(f Feature, h cell.Hook) error { - fl.mu.Lock() - defer fl.mu.Unlock() - - if status, ok := fl.status[f]; ok && status { - return fmt.Errorf("cannot add hooks to a running feature: %s", f) - } - - fl.hooks[f] = append(fl.hooks[f], h) - fl.status[f] = false - - return nil -} - -// Start attempts to start a feature by executing its associated hooks. -// It returns an error if the feature is already running -// or if any hook fails to start. -func (fl *FeatureLifecycle) Start(f Feature, c context.Context, l *slog.Logger) error { - fl.mu.Lock() - defer fl.mu.Unlock() - - if status, ok := fl.status[f]; ok && status { - return fmt.Errorf("feature %s is already running", f) - } - - l.Debug("Starting hooks for", "feature", f) - for _, hook := range fl.hooks[f] { - if err := hook.Start(c); err != nil { - l.Error("Start hook failed", "error", err) - return fmt.Errorf("starting hook for feature %s: %w", f, err) - } - } - - fl.status[f] = true - - return nil -} - -// Stop attempts to stop a feature by stopping its associated hooks. -// It returns an error if the feature is already stopped. -// If any hook encounters an error during stopping it aggregates into return error -func (fl *FeatureLifecycle) Stop(f Feature, c context.Context, l *slog.Logger) error { - fl.mu.Lock() - defer fl.mu.Unlock() - - if status, ok := fl.status[f]; ok && !status { - return fmt.Errorf("feature %s is already stopped", f) - } - - var errs error - - l.Debug("Stopping hooks for", "feature", f) - - for i := len(fl.hooks[f]) - 1; i >= 0; i-- { - hook := fl.hooks[f][i] - if err := hook.Stop(c); err != nil { - l.Error("Stop hook failed", "error", err) - errs = errors.Join(errs, err) - } - } - - fl.status[f] = false - - return errs -} - -// IsRunning checks if a feature is currently running. -// It returns true if the feature exists in status map -// and its status is true, and false otherwise. -func (fl *FeatureLifecycle) IsRunning(f Feature) bool { - fl.mu.Lock() - defer fl.mu.Unlock() - - status, ok := fl.status[f] - return ok && status - -} - -// List returns a list of all features registered -func (fl *FeatureLifecycle) List() []Feature { - fl.mu.Lock() - defer fl.mu.Unlock() - - return slices.Collect(maps.Keys(fl.hooks)) -} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/health/metrics.go b/vendor/github.com/cilium/cilium/pkg/hive/health/metrics.go index b1f24738f4..1e65a15dce 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/health/metrics.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/health/metrics.go @@ -18,7 +18,8 @@ import ( ) type Metrics struct { - HealthStatusGauge metric.Vec[metric.Gauge] + HealthStatusGauge metric.Vec[metric.Gauge] + DegradedHealthStatusGauge metric.DeletableVec[metric.Gauge] } func newMetrics() *Metrics { @@ -30,10 +31,18 @@ func newMetrics() *Metrics { Name: "status", Help: "Counts of health status levels of Hive components", }, []string{"status"}), + + DegradedHealthStatusGauge: metric.NewGaugeVec(metric.GaugeOpts{ + Namespace: "cilium", + Subsystem: "hive", + Name: "degraded_status", + Help: "Counts degraded health status levels of Hive components labeled by modules", + Disabled: true, + }, []string{"module"}), } } -type publishFunc func(map[types.Level]uint64) +type publishFunc func(map[types.Level]uint64, map[string]uint64) type metricPublisherParams struct { cell.In @@ -44,13 +53,24 @@ type metricPublisherParams struct { Metrics *Metrics } -// metricPublisher periodically publishes the hive module health metric (hive_health_status_levels). +// metricPublisher periodically publishes the hive module health metric +// * cilium_hive_status +// * cilium_hive_degraded_status func metricPublisher(p metricPublisherParams) { // Performs the actual writing to the metric. Extracted to make testing easy. - publish := func(stats map[types.Level]uint64) { + publish := func(stats map[types.Level]uint64, degradedModuleCount map[string]uint64) { for l, v := range stats { p.Metrics.HealthStatusGauge.WithLabelValues(strings.ToLower(string(l))).Set(float64(v)) } + for k, v := range degradedModuleCount { + // If the module is healthy attempt to remove any associated metrics with that module + if v == 0 { + p.Metrics.DegradedHealthStatusGauge.DeleteLabelValues(k) + continue + } + + p.Metrics.DegradedHealthStatusGauge.WithLabelValues(k).Set(float64(v)) + } } if p.Metrics.HealthStatusGauge.IsEnabled() { @@ -68,13 +88,34 @@ func publishJob(ctx context.Context, p metricPublisherParams, publish publishFun limiter := rate.NewLimiter(15*time.Second, 3) defer limiter.Stop() // Avoids leaking a goroutine. + idToStatus := make(map[string]uint64) it, watch := p.Table.AllWatch(p.DB.ReadTxn()) for { stats := make(map[types.Level]uint64) + // Reset health ID status counts + for k := range idToStatus { + idToStatus[k] = 0 + } + for obj := range it { stats[obj.Level]++ + + _, ok := idToStatus[obj.ID.Module.String()] + if obj.Level == types.LevelDegraded { + idToStatus[obj.ID.Module.String()]++ + } else if !ok { + idToStatus[obj.ID.Module.String()] = 0 + } + } + + publish(stats, idToStatus) + + // Removed old IDs + for k, v := range idToStatus { + if v == 0 { + delete(idToStatus, k) + } } - publish(stats) select { case <-ctx.Done(): diff --git a/vendor/github.com/cilium/cilium/pkg/hive/hive.go b/vendor/github.com/cilium/cilium/pkg/hive/hive.go index ffeb583cc4..82397c860d 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/hive.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/hive.go @@ -16,11 +16,14 @@ import ( "github.com/cilium/statedb" "github.com/sirupsen/logrus" + flowpb "github.com/cilium/cilium/api/v1/flow" "github.com/cilium/cilium/pkg/cidr" "github.com/cilium/cilium/pkg/hive/health" "github.com/cilium/cilium/pkg/hive/health/types" + "github.com/cilium/cilium/pkg/hubble" "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics" ) type ( @@ -40,18 +43,36 @@ func New(cells ...cell.Cell) *Hive { cells = append( slices.Clone(cells), - health.Cell, job.Cell, - statedb.Cell, + // Module health + cell.Group( + health.Cell, + cell.Provide( + func(provider types.Provider) cell.Health { + return provider.ForModule(nil) + }, + ), + ), + + // StateDB and its metrics + cell.Group( + statedb.Cell, + + metrics.Metric(NewStateDBMetrics), + metrics.Metric(NewStateDBReconcilerMetrics), + cell.Provide( + NewStateDBMetricsImpl, + NewStateDBReconcilerMetricsImpl, + ), + ), + + // The root logrus FieldLogger. cell.Provide( - NewStateDBMetrics, - NewStateDBReconcilerMetrics, func() logrus.FieldLogger { return logging.DefaultLogger }, - func(provider types.Provider) cell.Health { - return provider.ForModule(nil) - }, - )) + ), + ) + // Scope logging and health by module ID. moduleDecorators := []cell.ModuleDecorator{ func(log logrus.FieldLogger, mid cell.ModuleID) logrus.FieldLogger { @@ -93,6 +114,20 @@ var decodeHooks = cell.DecodeHooks{ } return cidr.ParseCIDR(s) }, + // Decode JSON encoded *flowpb.FlowFilter fields + func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.Slice { + return data, nil + } + xs, ok := data.([]string) + if !ok { + return data, nil + } + if to != reflect.TypeOf(([]*flowpb.FlowFilter)(nil)) { + return data, nil + } + return hubble.ParseFlowFilters(xs...) + }, } func AddConfigOverride[Cfg cell.Flagger](h *Hive, override func(*Cfg)) { diff --git a/vendor/github.com/cilium/cilium/pkg/hive/reconciler_metrics.go b/vendor/github.com/cilium/cilium/pkg/hive/reconciler_metrics.go index f58b3ae069..9101d2eb82 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/reconciler_metrics.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/reconciler_metrics.go @@ -25,76 +25,73 @@ type ReconcilerMetrics struct { } const ( - LabelModuleId = "module_id" - LabelOperation = "op" + labelModuleId = "module_id" + labelOperation = "op" ) -func NewStateDBReconcilerMetrics() (ReconcilerMetrics, reconciler.Metrics) { +func NewStateDBReconcilerMetrics() ReconcilerMetrics { m := ReconcilerMetrics{ ReconciliationCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: metrics.Namespace + "_count", - Disabled: true, - Namespace: metrics.Namespace, - Subsystem: "reconciler", - Name: "count", - Help: "Number of reconciliation rounds performed", - }, []string{LabelModuleId}), + Disabled: true, + Namespace: metrics.Namespace, + Subsystem: "reconciler", + Name: "count", + Help: "Number of reconciliation rounds performed", + }, []string{labelModuleId}), ReconciliationDuration: metric.NewHistogramVec(metric.HistogramOpts{ - ConfigName: metrics.Namespace + "_reconciler_duration_seconds", - Disabled: true, - Namespace: metrics.Namespace, - Subsystem: "reconciler", - Name: "duration_seconds", - Help: "Histogram of per-operation duration during reconciliation", - }, []string{LabelModuleId, LabelOperation}), + Disabled: true, + Namespace: metrics.Namespace, + Subsystem: "reconciler", + Name: "duration_seconds", + Help: "Histogram of per-operation duration during reconciliation", + }, []string{labelModuleId, labelOperation}), ReconciliationTotalErrors: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: metrics.Namespace + "_reconciler_errors_total", - Disabled: true, - Namespace: metrics.Namespace, - Subsystem: "reconciler", - Name: "errors_total", - Help: "Total number of errors encountered during reconciliation", - }, []string{LabelModuleId}), + Disabled: true, + Namespace: metrics.Namespace, + Subsystem: "reconciler", + Name: "errors_total", + Help: "Total number of errors encountered during reconciliation", + }, []string{labelModuleId}), ReconciliationCurrentErrors: metric.NewGaugeVec(metric.GaugeOpts{ - ConfigName: metrics.Namespace + "_reconciler_errors_current", - Disabled: true, - Namespace: metrics.Namespace, - Subsystem: "reconciler", - Name: "errors_current", - Help: "The number of objects currently failing to be reconciled", - }, []string{LabelModuleId}), + Disabled: true, + Namespace: metrics.Namespace, + Subsystem: "reconciler", + Name: "errors_current", + Help: "The number of objects currently failing to be reconciled", + }, []string{labelModuleId}), PruneCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: metrics.Namespace + "_reconciler_prune_count", - Disabled: true, - Namespace: metrics.Namespace, - Subsystem: "reconciler", - Name: "prune_count", - Help: "Number of prunes performed", - }, []string{LabelModuleId}), + Disabled: true, + Namespace: metrics.Namespace, + Subsystem: "reconciler", + Name: "prune_count", + Help: "Number of prunes performed", + }, []string{labelModuleId}), PruneTotalErrors: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: metrics.Namespace + "_reconciler_prune_errors_total", - Disabled: true, - Namespace: metrics.Namespace, - Subsystem: "reconciler", - Name: "full_errors_total", - Help: "Total number of errors encountered during full reconciliation", - }, []string{LabelModuleId}), + Disabled: true, + Namespace: metrics.Namespace, + Subsystem: "reconciler", + Name: "prune_errors_total", + Help: "Total number of errors encountered during pruning", + }, []string{labelModuleId}), PruneDuration: metric.NewHistogramVec(metric.HistogramOpts{ - ConfigName: metrics.Namespace + "_reconciler_prune_duration_seconds", - Disabled: true, - Namespace: metrics.Namespace, - Subsystem: "reconciler", - Name: "full_duration_seconds", - Help: "Histogram of per-operation duration during full reconciliation", - }, []string{LabelModuleId, LabelOperation}), + Disabled: true, + Namespace: metrics.Namespace, + Subsystem: "reconciler", + Name: "prune_duration_seconds", + Help: "Histogram of pruning duration", + }, []string{labelModuleId}), } - return m, &reconcilerMetricsImpl{m} + return m +} + +func NewStateDBReconcilerMetricsImpl(m ReconcilerMetrics) reconciler.Metrics { + return &reconcilerMetricsImpl{m} } type reconcilerMetricsImpl struct { @@ -104,7 +101,7 @@ type reconcilerMetricsImpl struct { // PruneDuration implements reconciler.Metrics. func (m *reconcilerMetricsImpl) PruneDuration(moduleID cell.FullModuleID, duration time.Duration) { if m.m.PruneDuration.IsEnabled() { - m.m.PruneDuration.WithLabelValues(LabelModuleId, moduleID.String()). + m.m.PruneDuration.WithLabelValues(moduleID.String()). Observe(duration.Seconds()) } } @@ -112,9 +109,9 @@ func (m *reconcilerMetricsImpl) PruneDuration(moduleID cell.FullModuleID, durati // FullReconciliationErrors implements reconciler.Metrics. func (m *reconcilerMetricsImpl) PruneError(moduleID cell.FullModuleID, err error) { if m.m.PruneCount.IsEnabled() { - m.m.PruneCount.WithLabelValues(LabelModuleId, moduleID.String()) + m.m.PruneCount.WithLabelValues(moduleID.String()).Inc() } - if m.m.PruneTotalErrors.IsEnabled() { + if err != nil && m.m.PruneTotalErrors.IsEnabled() { m.m.PruneTotalErrors.WithLabelValues(moduleID.String()).Add(1) } } @@ -122,10 +119,10 @@ func (m *reconcilerMetricsImpl) PruneError(moduleID cell.FullModuleID, err error // ReconciliationDuration implements reconciler.Metrics. func (m *reconcilerMetricsImpl) ReconciliationDuration(moduleID cell.FullModuleID, operation string, duration time.Duration) { if m.m.ReconciliationCount.IsEnabled() { - m.m.ReconciliationCount.WithLabelValues(LabelModuleId, moduleID.String()).Inc() + m.m.ReconciliationCount.WithLabelValues(moduleID.String()).Inc() } if m.m.ReconciliationDuration.IsEnabled() { - m.m.ReconciliationDuration.WithLabelValues(LabelModuleId, moduleID.String(), LabelOperation, operation). + m.m.ReconciliationDuration.WithLabelValues(moduleID.String(), operation). Observe(duration.Seconds()) } } @@ -133,10 +130,10 @@ func (m *reconcilerMetricsImpl) ReconciliationDuration(moduleID cell.FullModuleI // ReconciliationErrors implements reconciler.Metrics. func (m *reconcilerMetricsImpl) ReconciliationErrors(moduleID cell.FullModuleID, new, current int) { if m.m.ReconciliationCurrentErrors.IsEnabled() { - m.m.ReconciliationCurrentErrors.WithLabelValues(LabelModuleId, moduleID.String()).Set(float64(current)) + m.m.ReconciliationCurrentErrors.WithLabelValues(moduleID.String()).Set(float64(current)) } if m.m.ReconciliationTotalErrors.IsEnabled() { - m.m.ReconciliationCurrentErrors.WithLabelValues(LabelModuleId, moduleID.String()).Add(float64(new)) + m.m.ReconciliationCurrentErrors.WithLabelValues(moduleID.String()).Add(float64(new)) } } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/statedb_metrics.go b/vendor/github.com/cilium/cilium/pkg/hive/statedb_metrics.go index a9e86ffb1b..3e764604f9 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/statedb_metrics.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/statedb_metrics.go @@ -34,6 +34,12 @@ type StateDBMetrics struct { TableGraveyardCleaningDuration metric.Vec[metric.Observer] } +const ( + labelTable = "table" + labelTables = "tables" + labelHandle = "handle" +) + type stateDBMetricsImpl struct { m StateDBMetrics } @@ -41,42 +47,42 @@ type stateDBMetricsImpl struct { // DeleteTrackerCount implements statedb.Metrics. func (i stateDBMetricsImpl) DeleteTrackerCount(tableName string, numTrackers int) { if i.m.TableDeleteTrackerCount.IsEnabled() { - i.m.TableDeleteTrackerCount.WithLabelValues("table", tableName).Set(float64(numTrackers)) + i.m.TableDeleteTrackerCount.WithLabelValues(tableName).Set(float64(numTrackers)) } } // GraveyardCleaningDuration implements statedb.Metrics. func (i stateDBMetricsImpl) GraveyardCleaningDuration(tableName string, duration time.Duration) { if i.m.TableGraveyardCleaningDuration.IsEnabled() { - i.m.TableGraveyardCleaningDuration.WithLabelValues("table", tableName).Observe(float64(duration.Seconds())) + i.m.TableGraveyardCleaningDuration.WithLabelValues(tableName).Observe(float64(duration.Seconds())) } } // GraveyardLowWatermark implements statedb.Metrics. func (i stateDBMetricsImpl) GraveyardLowWatermark(tableName string, lowWatermark uint64) { if i.m.TableGraveyardLowWatermark.IsEnabled() { - i.m.TableGraveyardLowWatermark.WithLabelValues("table", tableName).Set(float64(lowWatermark)) + i.m.TableGraveyardLowWatermark.WithLabelValues(tableName).Set(float64(lowWatermark)) } } // GraveyardObjectCount implements statedb.Metrics. func (i stateDBMetricsImpl) GraveyardObjectCount(tableName string, numDeletedObjects int) { if i.m.TableGraveyardObjectCount.IsEnabled() { - i.m.TableGraveyardObjectCount.WithLabelValues("table", tableName).Set(float64(numDeletedObjects)) + i.m.TableGraveyardObjectCount.WithLabelValues(tableName).Set(float64(numDeletedObjects)) } } // ObjectCount implements statedb.Metrics. func (i stateDBMetricsImpl) ObjectCount(tableName string, numObjects int) { if i.m.TableObjectCount.IsEnabled() { - i.m.TableObjectCount.WithLabelValues("table", tableName).Set(float64(numObjects)) + i.m.TableObjectCount.WithLabelValues(tableName).Set(float64(numObjects)) } } // Revision implements statedb.Metrics. func (i stateDBMetricsImpl) Revision(tableName string, revision uint64) { if i.m.TableRevision.IsEnabled() { - i.m.TableRevision.WithLabelValues("table", tableName).Set(float64(revision)) + i.m.TableRevision.WithLabelValues(tableName).Set(float64(revision)) } } @@ -84,8 +90,7 @@ func (i stateDBMetricsImpl) Revision(tableName string, revision uint64) { func (i stateDBMetricsImpl) WriteTxnDuration(handle string, tables []string, acquire time.Duration) { if i.m.WriteTxnDuration.IsEnabled() { i.m.WriteTxnDuration.WithLabelValues( - "handle", handle, - "tables", strings.Join(tables, ","), + handle, strings.Join(tables, ","), ).Observe(acquire.Seconds()) } } @@ -93,10 +98,7 @@ func (i stateDBMetricsImpl) WriteTxnDuration(handle string, tables []string, acq // WriteTxnTableAcquisition implements statedb.Metrics. func (i stateDBMetricsImpl) WriteTxnTableAcquisition(handle string, tableName string, acquire time.Duration) { if i.m.TableContention.IsEnabled() { - i.m.TableContention.WithLabelValues( - "handle", handle, - "table", tableName, - ) + i.m.TableContention.WithLabelValues(handle, tableName) } } @@ -104,79 +106,82 @@ func (i stateDBMetricsImpl) WriteTxnTableAcquisition(handle string, tableName st func (i stateDBMetricsImpl) WriteTxnTotalAcquisition(handle string, tables []string, acquire time.Duration) { if i.m.WriteTxnAcquisition.IsEnabled() { i.m.WriteTxnAcquisition.WithLabelValues( - "handle", handle, - "tables", strings.Join(tables, ","), + handle, strings.Join(tables, ","), ) } } var _ statedb.Metrics = stateDBMetricsImpl{} -func NewStateDBMetrics() (StateDBMetrics, statedb.Metrics) { +func NewStateDBMetrics() StateDBMetrics { m := StateDBMetrics{ WriteTxnDuration: metric.NewHistogramVec(metric.HistogramOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "write_txn_duration_seconds", Help: "How long a write transaction was held.", Disabled: true, - }, []string{"tables", "handle"}), + }, []string{labelHandle, labelTables}), WriteTxnAcquisition: metric.NewHistogramVec(metric.HistogramOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "write_txn_acquisition_seconds", Help: "How long it took to acquire a write transaction for all tables.", Disabled: true, - }, []string{"tables", "handle"}), + }, []string{labelHandle, labelTables}), TableContention: metric.NewGaugeVec(metric.GaugeOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "table_contention_seconds", Help: "How long writers were blocked while waiting to acquire a write transaction for a specific table.", Disabled: true, - }, []string{"table"}), + }, []string{labelHandle, labelTable}), TableObjectCount: metric.NewGaugeVec(metric.GaugeOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "table_objects", Help: "The amount of objects in a given table.", Disabled: true, - }, []string{"table"}), + }, []string{labelTable}), TableRevision: metric.NewGaugeVec(metric.GaugeOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "table_revision", Help: "The current revision of a given table.", Disabled: true, - }, []string{"table"}), + }, []string{labelTable}), TableDeleteTrackerCount: metric.NewGaugeVec(metric.GaugeOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "table_delete_trackers", Help: "The amount of delete trackers for a given table.", Disabled: true, - }, []string{"table"}), + }, []string{labelTable}), TableGraveyardObjectCount: metric.NewGaugeVec(metric.GaugeOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "table_graveyard_objects", Help: "The amount of objects in the graveyard for a given table.", Disabled: true, - }, []string{"table"}), + }, []string{labelTable}), TableGraveyardLowWatermark: metric.NewGaugeVec(metric.GaugeOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "table_graveyard_low_watermark", Help: "The lowest revision of a given table that has been processed by the graveyard garbage collector.", Disabled: true, - }, []string{"table"}), + }, []string{labelTable}), TableGraveyardCleaningDuration: metric.NewHistogramVec(metric.HistogramOpts{ - Namespace: metrics.CiliumAgentNamespace, + Namespace: metrics.Namespace, Subsystem: "statedb", Name: "table_graveyard_cleaning_duration_seconds", Help: "The time it took to clean the graveyard for a given table.", Disabled: true, - }, []string{"table"}), + }, []string{labelTable}), } - return m, stateDBMetricsImpl{m} + return m +} + +func NewStateDBMetricsImpl(m StateDBMetrics) statedb.Metrics { + return stateDBMetricsImpl{m} } diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/helpers.go b/vendor/github.com/cilium/cilium/pkg/hubble/helpers.go new file mode 100644 index 0000000000..69f7fa4279 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hubble/helpers.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package hubble + +import ( + "encoding/json" + "fmt" + "strings" + + flowpb "github.com/cilium/cilium/api/v1/flow" +) + +func ParseFlowFilters(args ...string) ([]*flowpb.FlowFilter, error) { + filters := make([]*flowpb.FlowFilter, 0, len(args)) + for _, enc := range args { + dec := json.NewDecoder(strings.NewReader(enc)) + var filter flowpb.FlowFilter + if err := dec.Decode(&filter); err != nil { + return nil, fmt.Errorf("failed to decode flow filter '%v': %w", enc, err) + } + filters = append(filters, &filter) + } + return filters, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identity.go b/vendor/github.com/cilium/cilium/pkg/identity/identity.go index 7e96d93cc1..d55de2cac2 100644 --- a/vendor/github.com/cilium/cilium/pkg/identity/identity.go +++ b/vendor/github.com/cilium/cilium/pkg/identity/identity.go @@ -8,6 +8,7 @@ import ( "fmt" "net" "strconv" + "sync" "github.com/cilium/cilium/pkg/labels" "github.com/cilium/cilium/pkg/option" @@ -322,3 +323,8 @@ func IdentityAllocationIsLocal(lbls labels.Labels) bool { // key, the well-known identity for it can be allocated locally. return LookupReservedIdentityByLabels(lbls) != nil } + +// UpdateIdentities is an interface to be called when identities change +type UpdateIdentities interface { + UpdateIdentities(added, deleted IdentityMap, wg *sync.WaitGroup) +} diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/cell.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/cell.go index 5b1fe39d78..bb8e812797 100644 --- a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/cell.go +++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/cell.go @@ -8,5 +8,5 @@ import "github.com/cilium/hive/cell" var Cell = cell.Module( "identity-manager", "Identity manager tracks identities assigned to locally managed endpoints ", - cell.Provide(NewIdentityManager), + cell.Provide(NewIDManager), ) diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go index 2f36277e64..03813c60c3 100644 --- a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go +++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go @@ -13,6 +13,15 @@ import ( "github.com/cilium/cilium/pkg/logging/logfields" ) +type IDManager interface { + Add(identity *identity.Identity) + GetIdentityModels() []*models.IdentityEndpoints + Remove(identity *identity.Identity) + RemoveAll() + RemoveOldAddNew(old *identity.Identity, new *identity.Identity) + Subscribe(o Observer) +} + // IdentityManager caches information about a set of identities, currently a // reference count of how many users there are for each identity. type IdentityManager struct { @@ -21,13 +30,17 @@ type IdentityManager struct { observers map[Observer]struct{} } +// NewIDManager returns an initialized IdentityManager. +func NewIDManager() IDManager { + return newIdentityManager() +} + type identityMetadata struct { identity *identity.Identity refCount uint } -// NewIdentityManager returns an initialized IdentityManager. -func NewIdentityManager() *IdentityManager { +func newIdentityManager() *IdentityManager { return &IdentityManager{ identities: make(map[identity.NumericIdentity]*identityMetadata), observers: make(map[Observer]struct{}), diff --git a/vendor/github.com/cilium/cilium/pkg/inctimer/inctimer.go b/vendor/github.com/cilium/cilium/pkg/inctimer/inctimer.go deleted file mode 100644 index 072c02a56e..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/inctimer/inctimer.go +++ /dev/null @@ -1,80 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package inctimer - -import "time" - -// IncTimer should be the preferred mechanism over -// calling `time.After` when wanting an `After`-like -// function in a loop. This prevents memory build up -// as the `time.After` method creates a new timer -// instance every time it is called, and it is not -// garbage collected until after it fires. Conversely, -// IncTimer only uses one timer and correctly stops -// the timer, clears its channel, and resets it -// everytime that `After` is called. -type IncTimer interface { - After(time.Duration) <-chan time.Time -} - -type incTimer struct { - t *time.Timer -} - -// New creates a new IncTimer and a done function. -// IncTimer only uses one timer and correctly stops -// the timer, clears the channel, and resets it every -// time the `After` function is called. -// WARNING: Concurrent use is not expected. The use -// of this timer should be for only one goroutine. -func New() (IncTimer, func() bool) { - it := &incTimer{} - return it, it.stop -} - -// stop returns true if a scheduled timer has been stopped before execution. -func (it *incTimer) stop() bool { - if it.t == nil { - return false - } - return it.t.Stop() -} - -// After returns a channel that will fire after -// the specified duration. -func (it *incTimer) After(d time.Duration) <-chan time.Time { - // Stop the previous timer (if any) to garbage collect it. - // The old timer channel will be garbage collected even if not drained. - it.stop() - - // We have to create a new timer for each invocation, because it is not - // possible to safely use https://golang.org/pkg/time/#Timer.Reset if we - // do not know if the timer channel has already been drained or not (which - // is the case here, as the client might have drained the channel already). - // Even after stopping a timer, it's not safe to attempt to drain its - // timer channel with a default case (for the case where the client has - // drained the channel already), as there is a small window where a timer - // is considered expired, but the channel has not received a value yet [1]. - // This would cause us to erroneously take the default case (assuming the - // channel has been drained by the client), when in fact the channel just - // has not received a value yet. Because the two cases (client has drained - // vs. value not received yet) are indistinguishable for us, we cannot use - // Timer.Reset and need to create a new timer. - // - // [1] The reason why this small window occurs, is because the Go runtime - // will remove a timer from the heap and and mark it as deleted _before_ - // it actually executes the timer function f: - // https://github.com/golang/go/blob/go1.16/src/runtime/time.go#L876 - // This causes t.Stop to report the timer as already expired while it is - // in fact currently running: - // https://github.com/golang/go/blob/go1.16/src/runtime/time.go#L352 - it.t = time.NewTimer(d) - return it.t.C -} - -// After wraps the time.After function to get around the /timeafter linter -// warning for cases where it is inconvenient to use the instantiated version. -func After(d time.Duration) <-chan time.Time { - return time.After(d) -} diff --git a/vendor/github.com/cilium/cilium/pkg/ip/ip.go b/vendor/github.com/cilium/cilium/pkg/ip/ip.go index 5563ae7f3e..7f457f340c 100644 --- a/vendor/github.com/cilium/cilium/pkg/ip/ip.go +++ b/vendor/github.com/cilium/cilium/pkg/ip/ip.go @@ -9,11 +9,10 @@ import ( "math/big" "net" "net/netip" + "slices" "sort" "go4.org/netipx" - - "github.com/cilium/cilium/pkg/slices" ) const ( @@ -753,15 +752,8 @@ func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, [ // netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6). // The slice is manipulated in-place destructively; it does not create a new slice. func KeepUniqueAddrs(addrs []netip.Addr) []netip.Addr { - return slices.SortedUniqueFunc( - addrs, - func(a, b netip.Addr) int { - return a.Compare(b) - }, - func(a, b netip.Addr) bool { - return a == b - }, - ) + SortAddrList(addrs) + return slices.Compact(addrs) } var privateIPBlocks []*net.IPNet @@ -847,15 +839,11 @@ func ListContainsIP(ipList []net.IP, ip net.IP) bool { // SortIPList sorts the provided net.IP slice in place. func SortIPList(ipList []net.IP) { - sort.Slice(ipList, func(i, j int) bool { - return bytes.Compare(ipList[i], ipList[j]) < 0 - }) + slices.SortFunc(ipList, func(a, b net.IP) int { return bytes.Compare(a, b) }) } func SortAddrList(ipList []netip.Addr) { - sort.Slice(ipList, func(i, j int) bool { - return ipList[i].Compare(ipList[j]) < 0 - }) + slices.SortFunc(ipList, netip.Addr.Compare) } // getSortedIPList returns a new net.IP slice in which the IPs are sorted. diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go index 81c6e5ebcd..5f938642ee 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go @@ -178,6 +178,13 @@ type IPAMSpec struct { // // +kubebuilder:validation:Minimum=0 MaxAboveWatermark int `json:"max-above-watermark,omitempty"` + + // StaticIPTags are used to determine the pool of IPs from which to + // attribute a static IP to the node. For example in AWS this is used to + // filter Elastic IP Addresses. + // + // +optional + StaticIPTags map[string]string `json:"static-ip-tags,omitempty"` } // IPReleaseStatus defines the valid states in IP release handshake @@ -230,6 +237,11 @@ type IPAMStatus struct { // // +optional ReleaseIPv6s map[string]IPReleaseStatus `json:"release-ipv6s,omitempty"` + + // AssignedStaticIP is the static IP assigned to the node (ex: public Elastic IP address in AWS) + // + // +optional + AssignedStaticIP string `json:"assigned-static-ip,omitempty"` } // IPAMPoolRequest is a request from the agent to the operator, indicating how diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go index fb9303d76c..b0af2cd053 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go @@ -151,6 +151,13 @@ func (in *IPAMSpec) DeepCopyInto(out *IPAMSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.StaticIPTags != nil { + in, out := &in.StaticIPTags, &out.StaticIPTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go index cc2d82b011..d0065ee549 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go @@ -211,6 +211,26 @@ func (in *IPAMSpec) DeepEqual(other *IPAMSpec) bool { if in.MaxAboveWatermark != other.MaxAboveWatermark { return false } + if ((in.StaticIPTags != nil) && (other.StaticIPTags != nil)) || ((in.StaticIPTags == nil) != (other.StaticIPTags == nil)) { + in, other := &in.StaticIPTags, &other.StaticIPTags + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + } return true } @@ -289,6 +309,10 @@ func (in *IPAMStatus) DeepEqual(other *IPAMStatus) bool { } } + if in.AssignedStaticIP != other.AssignedStaticIP { + return false + } + return true } diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go b/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go index 34f3ce6b8d..ee19ffb904 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go @@ -63,6 +63,14 @@ func NewResourceID(kind ResourceKind, namespace, name string) ResourceID { return ResourceID(str.String()) } +func (r ResourceID) Namespace() string { + parts := strings.SplitN(string(r), "/", 3) + if len(parts) < 2 { + return "" + } + return parts[1] +} + // TunnelPeer is the IP address of the host associated with this prefix. This is // typically used to establish a tunnel, e.g. in tunnel mode or for encryption. // This type implements ipcache.IPMetadata diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go index f76456ac38..b65d3a29ef 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go @@ -15,5 +15,5 @@ const ( // // Maintainers: Run ./Documentation/check-crd-compat-table.sh for each release // Developers: Bump patch for each change in the CRD schema. - CustomResourceDefinitionSchemaVersion = "1.30.2" + CustomResourceDefinitionSchemaVersion = "1.30.4" ) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go index a0ea8b57a4..b88ddedad5 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go @@ -203,7 +203,9 @@ func parseToCiliumEgressCommonRule(namespace string, es api.EndpointSelector, eg if egr.ToEndpoints != nil { retRule.ToEndpoints = make([]api.EndpointSelector, len(egr.ToEndpoints)) for j, ep := range egr.ToEndpoints { - retRule.ToEndpoints[j] = getEndpointSelector(namespace, ep.LabelSelector, true, matchesInit) + endpointSelector := getEndpointSelector(namespace, ep.LabelSelector, true, matchesInit) + endpointSelector.Generated = ep.Generated + retRule.ToEndpoints[j] = endpointSelector } } @@ -326,7 +328,9 @@ func ParseToCiliumRule(namespace, name string, uid types.UID, r *api.Rule) *api. retRule.EndpointSelector = api.NewESFromK8sLabelSelector("", r.EndpointSelector.LabelSelector) // The PodSelector should only reflect to the same namespace // the policy is being stored, thus we add the namespace to - // the MatchLabels map. + // the MatchLabels map. Additionally, Policy repository relies + // on this fact to properly choose correct network policies for + // a given Security Identity. // // Policies applying to all namespaces are a special case. // Such policies can match on any traffic from Pods or Nodes, diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go index 7e0641258b..d07a0a8a61 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go @@ -24,7 +24,6 @@ import ( // +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Security Identity",name="Security Identity",type=integer // +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string,priority=1 // +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string,priority=1 -// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string,priority=1 // +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string @@ -77,8 +76,6 @@ type EndpointStatus struct { Policy *EndpointPolicy `json:"policy,omitempty"` - VisibilityPolicyStatus *string `json:"visibility-policy-status,omitempty"` - // State is the state of the endpoint. // // +kubebuilder:validation:Enum=creating;waiting-for-identity;not-ready;waiting-to-regenerate;regenerating;restoring;ready;disconnecting;disconnected;invalid diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go index 029eeacecf..dcfda03400 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go @@ -1203,11 +1203,6 @@ func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { *out = new(EndpointPolicy) (*in).DeepCopyInto(*out) } - if in.VisibilityPolicyStatus != nil { - in, out := &in.VisibilityPolicyStatus, &out.VisibilityPolicyStatus - *out = new(string) - **out = **in - } if in.NamedPorts != nil { in, out := &in.NamedPorts, &out.NamedPorts *out = make(models.NamedPorts, len(*in)) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go index c0727c305e..7a8c9f35ea 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go @@ -921,14 +921,6 @@ func (in *EndpointStatus) DeepEqual(other *EndpointStatus) bool { } } - if (in.VisibilityPolicyStatus == nil) != (other.VisibilityPolicyStatus == nil) { - return false - } else if in.VisibilityPolicyStatus != nil { - if *in.VisibilityPolicyStatus != *other.VisibilityPolicyStatus { - return false - } - } - if in.State != other.State { return false } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go index dad7bb87f2..a0a4dae4fb 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go @@ -14,6 +14,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpclusterconfig",path="ciliumbgpclusterconfigs",scope="Cluster",shortName={cbgpcluster} // +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:subresource:status // +kubebuilder:storageversion // CiliumBGPClusterConfig is the Schema for the CiliumBGPClusterConfig API @@ -25,6 +26,11 @@ type CiliumBGPClusterConfig struct { // Spec defines the desired cluster configuration of the BGP control plane. Spec CiliumBGPClusterConfigSpec `json:"spec"` + + // Status is a running status of the cluster configuration + // + // +kubebuilder:validation:Optional + Status CiliumBGPClusterConfigStatus `json:"status"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -136,3 +142,23 @@ type PeerConfigReference struct { // +kubebuilder:validation:Required Name string `json:"name"` } + +type CiliumBGPClusterConfigStatus struct { + // The current conditions of the CiliumBGPClusterConfig + // + // +optional + // +listType=map + // +listMapKey=type + // +deepequal-gen=false + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// Conditions for CiliumBGPClusterConfig +const ( + // Node selector selects nothing + BGPClusterConfigConditionNoMatchingNode = "cilium.io/NoMatchingNode" + // Referenced peer configs are missing + BGPClusterConfigConditionMissingPeerConfigs = "cilium.io/MissingPeerConfigs" + // ClusterConfig with conflicting nodeSelector present + BGPClusterConfigConditionConflictingClusterConfigs = "cilium.io/ConflictingClusterConfig" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go index dc4cdb0336..f0e323bb2e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go @@ -28,6 +28,7 @@ type CiliumBGPPeerConfigList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgppeerconfig",path="ciliumbgppeerconfigs",scope="Cluster",shortName={cbgppeer} // +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:subresource:status // +kubebuilder:storageversion type CiliumBGPPeerConfig struct { @@ -38,6 +39,11 @@ type CiliumBGPPeerConfig struct { // Spec is the specification of the desired behavior of the CiliumBGPPeerConfig. Spec CiliumBGPPeerConfigSpec `json:"spec"` + + // Status is the running status of the CiliumBGPPeerConfig + // + // +kubebuilder:validation:Optional + Status CiliumBGPPeerConfigStatus `json:"status"` } type CiliumBGPPeerConfigSpec struct { @@ -92,6 +98,22 @@ type CiliumBGPPeerConfigSpec struct { Families []CiliumBGPFamilyWithAdverts `json:"families,omitempty"` } +type CiliumBGPPeerConfigStatus struct { + // The current conditions of the CiliumBGPPeerConfig + // + // +optional + // +listType=map + // +listMapKey=type + // +deepequal-gen=false + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// Conditions for CiliumBGPPeerConfig +const ( + // Referenced auth secret is missing + BGPPeerConfigConditionMissingAuthSecret = "cilium.io/MissingAuthSecret" +) + // CiliumBGPFamily represents a AFI/SAFI address family pair. type CiliumBGPFamily struct { // Afi is the Address Family Identifier (AFI) of the family. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go index 0b0bf728de..ad712afe26 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go @@ -241,6 +241,7 @@ func (in *CiliumBGPClusterConfig) DeepCopyInto(out *CiliumBGPClusterConfig) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -323,6 +324,29 @@ func (in *CiliumBGPClusterConfigSpec) DeepCopy() *CiliumBGPClusterConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfigStatus) DeepCopyInto(out *CiliumBGPClusterConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigStatus. +func (in *CiliumBGPClusterConfigStatus) DeepCopy() *CiliumBGPClusterConfigStatus { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfigStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumBGPFamily) DeepCopyInto(out *CiliumBGPFamily) { *out = *in @@ -935,6 +959,7 @@ func (in *CiliumBGPPeerConfig) DeepCopyInto(out *CiliumBGPPeerConfig) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -1037,6 +1062,29 @@ func (in *CiliumBGPPeerConfigSpec) DeepCopy() *CiliumBGPPeerConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeerConfigStatus) DeepCopyInto(out *CiliumBGPPeerConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigStatus. +func (in *CiliumBGPPeerConfigStatus) DeepCopy() *CiliumBGPPeerConfigStatus { + if in == nil { + return nil + } + out := new(CiliumBGPPeerConfigStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumBGPPeeringPolicy) DeepCopyInto(out *CiliumBGPPeeringPolicy) { *out = *in diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go index 2bb8aced3e..cca99d9aea 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go @@ -243,6 +243,10 @@ func (in *CiliumBGPClusterConfig) DeepEqual(other *CiliumBGPClusterConfig) bool return false } + if !in.Status.DeepEqual(&other.Status) { + return false + } + return true } @@ -281,6 +285,16 @@ func (in *CiliumBGPClusterConfigSpec) DeepEqual(other *CiliumBGPClusterConfigSpe return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPClusterConfigStatus) DeepEqual(other *CiliumBGPClusterConfigStatus) bool { + if other == nil { + return false + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *CiliumBGPFamily) DeepEqual(other *CiliumBGPFamily) bool { @@ -960,6 +974,10 @@ func (in *CiliumBGPPeerConfig) DeepEqual(other *CiliumBGPPeerConfig) bool { return false } + if !in.Status.DeepEqual(&other.Status) { + return false + } + return true } @@ -1030,6 +1048,16 @@ func (in *CiliumBGPPeerConfigSpec) DeepEqual(other *CiliumBGPPeerConfigSpec) boo return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPeerConfigStatus) DeepEqual(other *CiliumBGPPeerConfigStatus) bool { + if other == nil { + return false + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *CiliumBGPPeeringPolicy) DeepEqual(other *CiliumBGPPeeringPolicy) bool { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go index f0b6bc4360..629a182c1d 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go @@ -14,19 +14,25 @@ import ( "strings" "time" + "github.com/cilium/hive" "github.com/cilium/hive/cell" + "github.com/cilium/hive/script" "github.com/sirupsen/logrus" apiext_clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiext_fake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" k8sErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + versionapi "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/rest" + k8sTesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/connrotation" mcsapi_clientset "sigs.k8s.io/mcs-api/pkg/client/clientset/versioned" @@ -42,6 +48,7 @@ import ( slim_metav1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1" slim_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned" slim_fake "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake" + "github.com/cilium/cilium/pkg/k8s/testutils" k8sversion "github.com/cilium/cilium/pkg/k8s/version" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/version" @@ -456,7 +463,17 @@ func isConnReady(c kubernetes.Interface) error { return err } -var FakeClientCell = cell.Provide(NewFakeClientset) +var FakeClientCell = cell.Module( + "k8s-fake-client", + "Fake Kubernetes client", + + cell.Provide( + NewFakeClientset, + func(fc *FakeClientset) hive.ScriptCmdOut { + return hive.NewScriptCmd("k8s", FakeClientCommand(fc)) + }, + ), +) type ( MCSAPIFakeClientset = mcsapi_fake.Clientset @@ -477,6 +494,8 @@ type FakeClientset struct { SlimFakeClientset *SlimFakeClientset + trackers map[string]k8sTesting.ObjectTracker + enabled bool } @@ -509,6 +528,19 @@ func (c *FakeClientset) RestConfig() *rest.Config { } func NewFakeClientset() (*FakeClientset, Clientset) { + version := testutils.DefaultVersion + return NewFakeClientsetWithVersion(version) +} + +func NewFakeClientsetWithVersion(version string) (*FakeClientset, Clientset) { + if version == "" { + version = testutils.DefaultVersion + } + resources, found := testutils.APIResources[version] + if !found { + panic("version " + version + " not found from testutils.APIResources") + } + client := FakeClientset{ SlimFakeClientset: slim_fake.NewSimpleClientset(), CiliumFakeClientset: cilium_fake.NewSimpleClientset(), @@ -517,10 +549,30 @@ func NewFakeClientset() (*FakeClientset, Clientset) { KubernetesFakeClientset: fake.NewSimpleClientset(), enabled: true, } + client.KubernetesFakeClientset.Resources = resources + client.SlimFakeClientset.Resources = resources + client.CiliumFakeClientset.Resources = resources + client.APIExtFakeClientset.Resources = resources + client.trackers = map[string]k8sTesting.ObjectTracker{ + "slim": client.SlimFakeClientset.Tracker(), + "cilium": client.CiliumFakeClientset.Tracker(), + "mcs": client.MCSAPIFakeClientset.Tracker(), + "kubernetes": client.KubernetesFakeClientset.Tracker(), + "apiexit": client.APIExtFakeClientset.Tracker(), + } + + fd := client.KubernetesFakeClientset.Discovery().(*fakediscovery.FakeDiscovery) + fd.FakedServerVersion = toVersionInfo(version) + client.clientsetGetters = clientsetGetters{&client} return &client, &client } +func toVersionInfo(rawVersion string) *versionapi.Info { + parts := strings.Split(rawVersion, ".") + return &versionapi.Info{Major: parts[0], Minor: parts[1]} +} + type ClientBuilderFunc func(name string) (Clientset, error) // NewClientBuilder returns a function that creates a new Clientset with the given @@ -545,6 +597,79 @@ func FakeClientBuilder() ClientBuilderFunc { } } +func FakeClientCommand(fc *FakeClientset) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "interact with fake k8s client", + Args: " args...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + if len(args) < 1 { + return nil, fmt.Errorf("usage: k8s files...\n is one of add, update or delete.") + } + + action := args[0] + if len(args) < 2 { + return nil, fmt.Errorf("usage: k8s %s files...", action) + } + + for _, file := range args[1:] { + b, err := os.ReadFile(s.Path(file)) + if err != nil { + // Try relative to current directory, e.g. to allow reading "testdata/foo.yaml" + b, err = os.ReadFile(file) + } + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", file, err) + } + obj, gvk, err := testutils.DecodeObjectGVK(b) + if err != nil { + return nil, fmt.Errorf("decode: %w", err) + } + gvr, _ := meta.UnsafeGuessKindToResource(*gvk) + objMeta, err := meta.Accessor(obj) + if err != nil { + return nil, fmt.Errorf("accessor: %w", err) + } + name := objMeta.GetName() + ns := objMeta.GetNamespace() + + // Try to add the object to all the trackers. If one of them + // accepts we're good. We'll add to all since multiple trackers + // may accept (e.g. slim and kubernetes). + + // err will get set to nil if any of the tracker methods succeed. + // start with a non-nil default error. + err = fmt.Errorf("none of the trackers of FakeClientset accepted %T", obj) + for trackerName, tracker := range fc.trackers { + var trackerErr error + switch action { + case "add": + trackerErr = tracker.Add(obj) + case "update": + trackerErr = tracker.Update(gvr, obj, ns) + case "delete": + trackerErr = tracker.Delete(gvr, ns, name) + default: + return nil, fmt.Errorf("unknown k8s action %q, expected 'add', 'update' or 'delete'", action) + } + if err != nil { + if trackerErr == nil { + // One of the trackers accepted the object, it's a success! + err = nil + } else { + err = errors.Join(err, fmt.Errorf("%s: %w", trackerName, trackerErr)) + } + } + } + if err != nil { + return nil, err + } + } + return nil, nil + }) +} + func init() { // Register the metav1.Table and metav1.PartialObjectMetadata for the // apiextclientset. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go index 95fe53344f..5a4adb3c92 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go @@ -26,6 +26,8 @@ type CiliumBGPClusterConfigsGetter interface { type CiliumBGPClusterConfigInterface interface { Create(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.CreateOptions) (*v2alpha1.CiliumBGPClusterConfig, error) Update(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPClusterConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPClusterConfig, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPClusterConfig, error) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go index 8b5c9f3ce9..dace3626c7 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go @@ -26,6 +26,8 @@ type CiliumBGPPeerConfigsGetter interface { type CiliumBGPPeerConfigInterface interface { Create(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.CreateOptions) (*v2alpha1.CiliumBGPPeerConfig, error) Update(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPPeerConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPPeerConfig, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPPeerConfig, error) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go index c45f25cf1c..a9951b2477 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgpclusterconfig.go @@ -86,6 +86,18 @@ func (c *FakeCiliumBGPClusterConfigs) Update(ctx context.Context, ciliumBGPClust return obj.(*v2alpha1.CiliumBGPClusterConfig), err } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumBGPClusterConfigs) UpdateStatus(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + emptyResult := &v2alpha1.CiliumBGPClusterConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(ciliumbgpclusterconfigsResource, "status", ciliumBGPClusterConfig, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v2alpha1.CiliumBGPClusterConfig), err +} + // Delete takes name of the ciliumBGPClusterConfig and deletes it. Returns an error if one occurs. func (c *FakeCiliumBGPClusterConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go index 650a69554f..be7b6693b7 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeerconfig.go @@ -86,6 +86,18 @@ func (c *FakeCiliumBGPPeerConfigs) Update(ctx context.Context, ciliumBGPPeerConf return obj.(*v2alpha1.CiliumBGPPeerConfig), err } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCiliumBGPPeerConfigs) UpdateStatus(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + emptyResult := &v2alpha1.CiliumBGPPeerConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(ciliumbgppeerconfigsResource, "status", ciliumBGPPeerConfig, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v2alpha1.CiliumBGPPeerConfig), err +} + // Delete takes name of the ciliumBGPPeerConfig and deletes it. Returns an error if one occurs. func (c *FakeCiliumBGPPeerConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go index a2ecbfae8a..35b79d7742 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go @@ -40,7 +40,7 @@ type SharedConfig struct { // K8sHeartbeatTimeout configures the timeout for apiserver heartbeat K8sHeartbeatTimeout time.Duration - // K8sEnableAPIDiscovery enables Kubernetes API discovery + // EnableAPIDiscovery enables Kubernetes API discovery EnableK8sAPIDiscovery bool } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go b/vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go index 2fecf24e35..29e612729f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/endpoints.go @@ -450,8 +450,8 @@ type EndpointSlices struct { epSlices map[string]*Endpoints } -// newEndpointsSlices returns a new EndpointSlices -func newEndpointsSlices() *EndpointSlices { +// NewEndpointsSlices returns a new EndpointSlices +func NewEndpointsSlices() *EndpointSlices { return &EndpointSlices{ epSlices: map[string]*Endpoints{}, } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/portforward.go b/vendor/github.com/cilium/cilium/pkg/k8s/portforward.go new file mode 100644 index 0000000000..6e2e672278 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/portforward.go @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package k8s + +import ( + "context" + "fmt" + "io" + "net/http" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + kutil "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/podutils" + + "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" +) + +// ForwardedPort holds the local and remote mapped ports. +type ForwardedPort struct { + Local uint16 + Remote uint16 +} + +// PortForwardParameters are the needed parameters to call PortForward. +// +// Ports value follow the kubectl syntax: :: +// - 5000 means 5000:5000 listening on 5000 port locally, forwarding to 5000 in the pod +// - 8888:5000 means listening on 8888 port locally, forwarding to 5000 in the pod +// - 0:5000 means listening on a random port locally, forwarding to 5000 in the pod +// - :5000 means listening on a random port locally, forwarding to 5000 in the pod +type PortForwardParameters struct { + Namespace string + Pod string + Ports []string + Addresses []string + OutWriters OutWriters +} + +// OutWriters holds the two io.Writer used by the port forward. +// These can be safely disabled by setting them to nil. +type OutWriters struct { + Out io.Writer + ErrOut io.Writer +} + +// PortForwarder augments the k8s client-go PortForwarder with helper methods using a clientset. +type PortForwarder struct { + clientset kubernetes.Interface + config *rest.Config +} + +// NewPortForwarder creates a new PortForwarder ready to use. +func NewPortForwarder(clientset kubernetes.Interface, config *rest.Config) *PortForwarder { + return &PortForwarder{clientset: clientset, config: config} +} + +// PortForwardResult are the ports that have been forwarded by PortForward. +type PortForwardResult struct { + ForwardedPorts []ForwardedPort +} + +// PortForward executes in a goroutine a port forward command. +// To stop the port-forwarding, use the context by cancelling it. +func (pf *PortForwarder) PortForward(ctx context.Context, p PortForwardParameters) (*PortForwardResult, error) { + req := pf.clientset.CoreV1().RESTClient().Post().Namespace(p.Namespace). + Resource("pods").Name(p.Pod).SubResource(strings.ToLower("PortForward")) + + roundTripper, upgrader, err := spdy.RoundTripperFor(pf.config) + if err != nil { + return nil, err + } + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, req.URL()) + stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1) + if len(p.Addresses) == 0 { + p.Addresses = []string{"localhost"} + } + + pw, err := portforward.NewOnAddresses(dialer, p.Addresses, p.Ports, stopChan, readyChan, p.OutWriters.Out, p.OutWriters.ErrOut) + if err != nil { + return nil, err + } + + errChan := make(chan error, 1) + go func() { + if err := pw.ForwardPorts(); err != nil { + errChan <- err + } + }() + + go func() { + <-ctx.Done() + close(stopChan) + }() + + select { + case <-pw.Ready: + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-errChan: + return nil, err + } + + ports, err := pw.GetPorts() + if err != nil { + return nil, err + } + + forwardedPorts := make([]ForwardedPort, 0, len(ports)) + for _, port := range ports { + forwardedPorts = append(forwardedPorts, ForwardedPort{port.Local, port.Remote}) + } + + return &PortForwardResult{ + ForwardedPorts: forwardedPorts, + }, nil +} + +// PortForwardServiceResult are the ports that have been forwarded by PortForwardService. +type PortForwardServiceResult struct { + ForwardedPort ForwardedPort +} + +// PortForwardService executes in a goroutine a port forward command towards one of the pod behind a +// service. If `localPort` is 0, a random port is selected. If `svcPort` is 0, uses the first port +// configured on the service. +// +// To stop the port-forwarding, use the context by cancelling it. +func (pf *PortForwarder) PortForwardService(ctx context.Context, namespace, name string, localPort, svcPort int32) (*PortForwardServiceResult, error) { + svc, err := pf.clientset.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get service %q: %w", name, err) + } + + pod, err := pf.getFirstPodForService(ctx, svc) + if err != nil { + return nil, fmt.Errorf("failed to get service %q: %w", name, err) + } + + if svcPort == 0 { + svcPort = svc.Spec.Ports[0].Port + } + + containerPort, err := kutil.LookupContainerPortNumberByServicePort(*svc, *pod, svcPort) + if err != nil { + return nil, fmt.Errorf("failed to lookup container port with service port %d: %w", svcPort, err) + } + + p := PortForwardParameters{ + Namespace: pod.Namespace, + Pod: pod.Name, + Ports: []string{fmt.Sprintf("%d:%d", localPort, containerPort)}, + Addresses: nil, // default is localhost + OutWriters: OutWriters{Out: nil, ErrOut: nil}, + } + + res, err := pf.PortForward(ctx, p) + if err != nil { + return nil, fmt.Errorf("failed to port forward: %w", err) + } + + return &PortForwardServiceResult{ + ForwardedPort: res.ForwardedPorts[0], + }, nil +} + +// getFirstPodForService returns the first pod in the list of pods matching the service selector, +// sorted from most to less active (see `podutils.ActivePods` for more details). +func (pf *PortForwarder) getFirstPodForService(ctx context.Context, svc *corev1.Service) (*corev1.Pod, error) { + selector := labels.SelectorFromSet(svc.Spec.Selector) + podList, err := pf.clientset.CoreV1().Pods(svc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, fmt.Errorf("failed to get list of pods for service %q: %w", svc.Name, err) + } + if len(podList.Items) == 0 { + return nil, fmt.Errorf("no pods found for service: %s", svc.Name) + } + if len(podList.Items) == 1 { + return &podList.Items[0], nil + } + + pods := make([]*corev1.Pod, 0, len(podList.Items)) + for _, pod := range podList.Items { + pods = append(pods, &pod) + } + sortBy := func(pods []*corev1.Pod) sort.Interface { return sort.Reverse(podutils.ActivePods(pods)) } + sort.Sort(sortBy(pods)) + + return pods[0], nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/service.go b/vendor/github.com/cilium/cilium/pkg/k8s/service.go index 6e84cff86b..065b48f4bb 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/service.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/service.go @@ -229,7 +229,7 @@ func ParseService(svc *slim_corev1.Service, nodePortAddrs []netip.Addr) (Service if expType.canExpose(slim_corev1.ServiceTypeLoadBalancer) { for _, ip := range svc.Status.LoadBalancer.Ingress { - if ip.IP != "" { + if ip.IP != "" && ip.IPMode == nil || *ip.IPMode == slim_corev1.LoadBalancerIPModeVIP { loadBalancerIPs = append(loadBalancerIPs, ip.IP) } } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go b/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go index 600a679fab..3c4f10e6b0 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go @@ -301,7 +301,7 @@ func (s *ServiceCache) GetEndpointsOfService(svcID ServiceID) *Endpoints { // Services are iterated in random order. // The ServiceCache is read-locked during this function call. The passed in // Service and Endpoints references are read-only. -func (s *ServiceCache) ForEachService(yield func(svcID ServiceID, svc *Service, eps *Endpoints) bool) { +func (s *ServiceCache) ForEachService(yield func(svcID ServiceID, svc *Service, eps *EndpointSlices) bool) { s.mutex.RLock() defer s.mutex.RUnlock() @@ -310,8 +310,7 @@ func (s *ServiceCache) ForEachService(yield func(svcID ServiceID, svc *Service, if !ok { continue } - eps := ep.GetEndpoints() - if !yield(svcID, svc, eps) { + if !yield(svcID, svc, ep) { return } } @@ -454,7 +453,7 @@ func (s *ServiceCache) UpdateEndpoints(newEndpoints *Endpoints, swg *lock.Stoppa return esID.ServiceID, newEndpoints } } else { - eps = newEndpointsSlices() + eps = NewEndpointsSlices() s.endpoints[esID.ServiceID] = eps } @@ -628,19 +627,20 @@ func (s *ServiceCache) filterEndpoints(localEndpoints *Endpoints, svc *Service) // // OR Remote endpoints exist which correlate to the service. func (s *ServiceCache) correlateEndpoints(id ServiceID) (*Endpoints, bool) { - endpoints := newEndpoints() - - localEndpoints := s.endpoints[id].GetEndpoints() + endpoints := s.endpoints[id].GetEndpoints() svc, svcFound := s.services[id] - hasLocalEndpoints := localEndpoints != nil + hasLocalEndpoints := endpoints != nil if hasLocalEndpoints { - localEndpoints = s.filterEndpoints(localEndpoints, svc) + endpoints = s.filterEndpoints(endpoints, svc) - for ip, e := range localEndpoints.Backends { + for _, e := range endpoints.Backends { + // The endpoints returned by GetEndpoints are already deep copies, + // hence we can mutate them in-place without problems. e.Preferred = svcFound && svc.IncludeExternal && svc.ServiceAffinity == serviceAffinityLocal - endpoints.Backends[ip] = e.DeepCopy() } + } else { + endpoints = newEndpoints() } var hasExternalEndpoints bool diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/synced/crd.go b/vendor/github.com/cilium/cilium/pkg/k8s/synced/crd.go index 86b23a3054..c4dcbdffae 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/synced/crd.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/synced/crd.go @@ -37,11 +37,8 @@ func CRDResourceName(crd string) string { func agentCRDResourceNames() []string { result := []string{ - CRDResourceName(v2.CNPName), - CRDResourceName(v2.CCNPName), CRDResourceName(v2.CNName), CRDResourceName(v2.CIDName), - CRDResourceName(v2alpha1.CCGName), CRDResourceName(v2alpha1.CPIPName), } @@ -52,6 +49,18 @@ func agentCRDResourceNames() []string { } } + if option.Config.EnableCiliumNetworkPolicy { + result = append(result, CRDResourceName(v2.CNPName)) + } + + if option.Config.EnableCiliumClusterwideNetworkPolicy { + result = append(result, CRDResourceName(v2.CCNPName)) + } + + if option.Config.EnableCiliumNetworkPolicy || option.Config.EnableCiliumClusterwideNetworkPolicy { + result = append(result, CRDResourceName(v2alpha1.CCGName)) + } + if option.Config.EnableIPv4EgressGateway { result = append(result, CRDResourceName(v2.CEGPName)) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/synced/resources.go b/vendor/github.com/cilium/cilium/pkg/k8s/synced/resources.go index 32280dca04..4512d3e163 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/synced/resources.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/synced/resources.go @@ -9,7 +9,6 @@ import ( "golang.org/x/sync/errgroup" "k8s.io/client-go/tools/cache" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/time" ) @@ -185,7 +184,7 @@ func (r *Resources) WaitForCacheSyncWithTimeout(timeout time.Duration, resourceN // If timeout is reached, check if an event occurred that would // have pushed back the timeout and wait for that amount of time. select { - case now := <-inctimer.After(currTimeout): + case now := <-time.After(currTimeout): lastEvent, never := r.getTimeOfLastEvent(resource) if never { return fmt.Errorf("timed out after %s, never received event for resource %q", timeout, resource) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/testutils/decoder.go b/vendor/github.com/cilium/cilium/pkg/k8s/testutils/decoder.go index 61ae4c4275..dec8f753cd 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/testutils/decoder.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/testutils/decoder.go @@ -8,6 +8,7 @@ import ( "sync" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2" @@ -63,6 +64,11 @@ func DecodeObject(bytes []byte) (runtime.Object, error) { return obj, err } +func DecodeObjectGVK(bytes []byte) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := Decoder().Decode(bytes, nil, nil) + return obj, gvk, err +} + func DecodeFile(path string) (runtime.Object, error) { bs, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/testutils/resources.go b/vendor/github.com/cilium/cilium/pkg/k8s/testutils/resources.go new file mode 100644 index 0000000000..2944a47d7a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/testutils/resources.go @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package testutils + +import ( + corev1 "k8s.io/api/core/v1" + discov1 "k8s.io/api/discovery/v1" + discov1beta1 "k8s.io/api/discovery/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" +) + +var ( + DefaultVersion = "1.26" + + // APIResources is the list of API resources for the k8s version that we're mocking. + // This is mostly relevant for the feature detection at pkg/k8s/version/version.go. + // The lists here are currently not exhaustive and expanded on need-by-need basis. + APIResources = map[string][]*metav1.APIResourceList{ + "1.16": { + CoreV1APIResources, + CiliumV2APIResources, + }, + "1.24": { + CoreV1APIResources, + DiscoveryV1APIResources, + DiscoveryV1Beta1APIResources, + CiliumV2APIResources, + }, + "1.25": { + CoreV1APIResources, + DiscoveryV1APIResources, + CiliumV2APIResources, + }, + "1.26": { + CoreV1APIResources, + DiscoveryV1APIResources, + CiliumV2APIResources, + }, + } + + CoreV1APIResources = &metav1.APIResourceList{ + GroupVersion: corev1.SchemeGroupVersion.String(), + APIResources: []metav1.APIResource{ + {Name: "nodes", Kind: "Node"}, + {Name: "pods", Namespaced: true, Kind: "Pod"}, + {Name: "services", Namespaced: true, Kind: "Service"}, + {Name: "endpoints", Namespaced: true, Kind: "Endpoint"}, + }, + } + + CiliumV2APIResources = &metav1.APIResourceList{ + TypeMeta: metav1.TypeMeta{}, + GroupVersion: cilium_v2.SchemeGroupVersion.String(), + APIResources: []metav1.APIResource{ + {Name: cilium_v2.CNPluralName, Kind: cilium_v2.CNKindDefinition}, + {Name: cilium_v2.CEPPluralName, Namespaced: true, Kind: cilium_v2.CEPKindDefinition}, + {Name: cilium_v2.CIDPluralName, Namespaced: true, Kind: cilium_v2.CIDKindDefinition}, + {Name: cilium_v2.CEGPPluralName, Namespaced: true, Kind: cilium_v2.CEGPKindDefinition}, + {Name: cilium_v2.CNPPluralName, Namespaced: true, Kind: cilium_v2.CNPKindDefinition}, + {Name: cilium_v2.CCNPPluralName, Namespaced: true, Kind: cilium_v2.CCNPKindDefinition}, + {Name: cilium_v2.CLRPPluralName, Namespaced: true, Kind: cilium_v2.CLRPKindDefinition}, + {Name: cilium_v2.CEWPluralName, Namespaced: true, Kind: cilium_v2.CEWKindDefinition}, + {Name: cilium_v2.CCECPluralName, Namespaced: true, Kind: cilium_v2.CCECKindDefinition}, + {Name: cilium_v2.CECPluralName, Namespaced: true, Kind: cilium_v2.CECKindDefinition}, + }, + } + + DiscoveryV1APIResources = &metav1.APIResourceList{ + TypeMeta: metav1.TypeMeta{}, + GroupVersion: discov1.SchemeGroupVersion.String(), + APIResources: []metav1.APIResource{ + {Name: "endpointslices", Namespaced: true, Kind: "EndpointSlice"}, + }, + } + + DiscoveryV1Beta1APIResources = &metav1.APIResourceList{ + GroupVersion: discov1beta1.SchemeGroupVersion.String(), + APIResources: []metav1.APIResource{ + {Name: "endpointslices", Namespaced: true, Kind: "EndpointSlice"}, + }, + } +) diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go b/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go index 14d3841ac0..83d6991e76 100644 --- a/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go @@ -9,7 +9,6 @@ import ( client "go.etcd.io/etcd/client/v3" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/time" ) @@ -66,9 +65,6 @@ func SetupDummyWithConfigOpts(tb testing.TB, dummyBackend string, opts map[strin tb.Fatalf("Failed waiting for kvstore connection to be established: %v", err) } - timer, done := inctimer.New() - defer done() - // Multiple tests might be running in parallel by go test if they are part of // different packages. Let's implement a locking mechanism to ensure that only // one at a time can access the kvstore, to prevent that they interact with @@ -86,7 +82,7 @@ func SetupDummyWithConfigOpts(tb testing.TB, dummyBackend string, opts map[strin } select { - case <-timer.After(100 * time.Millisecond): + case <-time.After(100 * time.Millisecond): case <-ctx.Done(): tb.Fatal("Timed out waiting to acquire the kvstore lock") } diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go index dc43ad0f39..f865ebb774 100644 --- a/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go @@ -28,7 +28,6 @@ import ( "github.com/cilium/cilium/pkg/backoff" "github.com/cilium/cilium/pkg/defaults" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/option" @@ -1018,9 +1017,6 @@ func (e *etcdClient) statusChecker() { var consecutiveQuorumErrors uint - statusTimer, statusTimerDone := inctimer.New() - defer statusTimerDone() - e.RWMutex.Lock() // Ensure that lastHearbeat is always set to a non-zero value when starting // the status checker, to guarantee that we can correctly compute the time @@ -1106,7 +1102,7 @@ func (e *etcdClient) statusChecker() { case <-e.stopStatusChecker: close(e.statusCheckErrors) return - case <-statusTimer.After(e.extraOptions.StatusCheckInterval(allConnected)): + case <-time.After(e.extraOptions.StatusCheckInterval(allConnected)): } } } diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go b/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go index c70ee81046..b28c74f083 100644 --- a/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go @@ -13,7 +13,6 @@ import ( "github.com/cilium/cilium/pkg/debug" "github.com/cilium/cilium/pkg/defaults" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/time" ) @@ -79,8 +78,6 @@ func (pl *pathLocks) runGC() { } func (pl *pathLocks) lock(ctx context.Context, path string) (id uuid.UUID, err error) { - lockTimer, lockTimerDone := inctimer.New() - defer lockTimerDone() for { pl.mutex.Lock() if _, ok := pl.lockPaths[path]; !ok { @@ -95,7 +92,7 @@ func (pl *pathLocks) lock(ctx context.Context, path string) (id uuid.UUID, err e pl.mutex.Unlock() select { - case <-lockTimer.After(time.Duration(10) * time.Millisecond): + case <-time.After(10 * time.Millisecond): case <-ctx.Done(): err = fmt.Errorf("lock was cancelled: %w", ctx.Err()) return diff --git a/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go b/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go index 11e5669ec1..80d1b2ae3b 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go @@ -3,7 +3,10 @@ package labels -import "sort" +import ( + "fmt" + "sort" +) // LabelArrayList is an array of LabelArrays. It is primarily intended as a // simple collection @@ -35,16 +38,70 @@ func (ls LabelArrayList) GetModel() [][]string { // Equals returns true if the label arrays lists have the same label arrays in the same order. func (ls LabelArrayList) Equals(b LabelArrayList) bool { if len(ls) != len(b) { + fmt.Printf("LEN DIFFERS: obtained %v, expected %v\n", ls, b) return false } for l := range ls { if !ls[l].Equals(b[l]) { + fmt.Printf("LABEL ARRAY %d DIFFERS: obtained %v, expected %v\n", + l, ls[l], b[l]) return false } } return true } +// Diff returns the string of differences between 'ls' and 'expected' LabelArrayList with +// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively. +// For use in debugging. Assumes sorted LabelArrayLists. +func (ls LabelArrayList) Diff(expected LabelArrayList) (res string) { + res += "" + i := 0 + j := 0 + for i < len(ls) && j < len(expected) { + if ls[i].Equals(expected[j]) { + i++ + j++ + continue + } + if ls[i].Less(expected[j]) { + // obtained has an unexpected labelArray + res += " + " + ls[i].String() + "\n" + i++ + } + for j < len(expected) && expected[j].Less(ls[i]) { + // expected has a missing labelArray + res += " - " + expected[j].String() + "\n" + j++ + } + } + for i < len(ls) { + // obtained has an unexpected labelArray + res += " + " + ls[i].String() + "\n" + i++ + } + for j < len(expected) { + // expected has a missing labelArray + res += " - " + expected[j].String() + "\n" + j++ + } + + return res +} + +// GetModel returns the LabelArrayList as a [][]string. Each member LabelArray +// becomes a []string. +func (ls LabelArrayList) String() string { + res := "" + for _, v := range ls { + if res != "" { + res += ", " + } + res += v.String() + } + return res +} + // Sort sorts the LabelArrayList in-place, but also returns the sorted list // for convenience. The LabelArrays themselves must already be sorted. This is // true for all constructors of LabelArray. diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go index 8f6554459f..1a07322388 100644 --- a/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go +++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go @@ -79,13 +79,13 @@ const ( serviceFlagIntLocalScope = 1 << 12 serviceFlagTwoScopes = 1 << 13 serviceFlagQuarantined = 1 << 14 - serviceFlagFwdModeFlip = 1 << 15 + serviceFlagFwdModeDSR = 1 << 15 ) type SvcFlagParam struct { SvcType SVCType - SvcFwdModeFlip bool SvcNatPolicy SVCNatPolicy + SvcFwdModeDSR bool SvcExtLocal bool SvcIntLocal bool SessionAffinity bool @@ -147,8 +147,8 @@ func NewSvcFlag(p *SvcFlagParam) ServiceFlags { if p.Quarantined { flags |= serviceFlagQuarantined } - if p.SvcFwdModeFlip { - flags |= serviceFlagFwdModeFlip + if p.SvcFwdModeDSR { + flags |= serviceFlagFwdModeDSR } return flags @@ -253,8 +253,8 @@ func (s ServiceFlags) String() string { if s&serviceFlagQuarantined != 0 { str = append(str, "quarantined") } - if s&serviceFlagFwdModeFlip != 0 { - str = append(str, "flip") + if s&serviceFlagFwdModeDSR != 0 { + str = append(str, "dsr") } return strings.Join(str, ", ") } diff --git a/vendor/github.com/cilium/cilium/pkg/logging/slog.go b/vendor/github.com/cilium/cilium/pkg/logging/slog.go index ec95a7e6b6..ca062fe0f3 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/slog.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/slog.go @@ -11,8 +11,13 @@ import ( "time" "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/logging/logfields" ) +// logrErrorKey is the key used by the logr library for the error parameter. +const logrErrorKey = "err" + // SlogNopHandler discards all logs. var SlogNopHandler slog.Handler = nopHandler{} @@ -26,7 +31,7 @@ func (n nopHandler) WithGroup(string) slog.Handler { return n } var slogHandlerOpts = &slog.HandlerOptions{ AddSource: false, Level: slog.LevelInfo, - ReplaceAttr: replaceLevelAndDropTime, + ReplaceAttr: replaceAttrFnWithoutTimestamp, } // Default slog logger. Will be overwritten once initializeSlog is called. @@ -59,9 +64,9 @@ func initializeSlog(logOpts LogOptions, useStdout bool) { logFormat := logOpts.GetLogFormat() switch logFormat { case LogFormatJSON, LogFormatText: - opts.ReplaceAttr = replaceLevelAndDropTime + opts.ReplaceAttr = replaceAttrFnWithoutTimestamp case LogFormatJSONTimestamp, LogFormatTextTimestamp: - opts.ReplaceAttr = replaceLevel + opts.ReplaceAttr = replaceAttrFn } writer := os.Stderr @@ -83,7 +88,7 @@ func initializeSlog(logOpts LogOptions, useStdout bool) { } } -func replaceLevel(groups []string, a slog.Attr) slog.Attr { +func replaceAttrFn(groups []string, a slog.Attr) slog.Attr { switch a.Key { case slog.TimeKey: // Adjust to timestamp format that logrus uses; except that we can't @@ -95,21 +100,22 @@ func replaceLevel(groups []string, a slog.Attr) slog.Attr { Key: a.Key, Value: slog.StringValue(strings.ToLower(a.Value.String())), } + case logrErrorKey: + // Uniform the attribute identifying the error + return slog.Attr{ + Key: logfields.Error, + Value: a.Value, + } } return a } -func replaceLevelAndDropTime(groups []string, a slog.Attr) slog.Attr { +func replaceAttrFnWithoutTimestamp(groups []string, a slog.Attr) slog.Attr { switch a.Key { case slog.TimeKey: // Drop timestamps return slog.Attr{} - case slog.LevelKey: - // Lower-case the log level - return slog.Attr{ - Key: a.Key, - Value: slog.StringValue(strings.ToLower(a.Value.String())), - } + default: + return replaceAttrFn(groups, a) } - return a } diff --git a/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go b/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go index 9e9dd69ea7..8f0d415e7b 100644 --- a/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go +++ b/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go @@ -8,11 +8,13 @@ import ( "net" "github.com/vishvananda/netlink" + + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" ) // HasMacAddr returns true if the given network interface has L2 addr. func HasMacAddr(iface string) bool { - link, err := netlink.LinkByName(iface) + link, err := safenetlink.LinkByName(iface) if err != nil { return false } @@ -26,7 +28,7 @@ func LinkHasMacAddr(link netlink.Link) bool { // ReplaceMacAddressWithLinkName replaces the MAC address of the given link func ReplaceMacAddressWithLinkName(ifName, macAddress string) error { - l, err := netlink.LinkByName(ifName) + l, err := safenetlink.LinkByName(ifName) if err != nil { if errors.As(err, &netlink.LinkNotFoundError{}) { return nil diff --git a/vendor/github.com/cilium/cilium/pkg/monitor/api/files.go b/vendor/github.com/cilium/cilium/pkg/monitor/api/files.go index c2057b761f..b314a0582d 100644 --- a/vendor/github.com/cilium/cilium/pkg/monitor/api/files.go +++ b/vendor/github.com/cilium/cilium/pkg/monitor/api/files.go @@ -33,6 +33,7 @@ var files = map[uint8]string{ 112: "encap.h", 113: "encrypt.h", 114: "host_firewall.h", + 115: "nodeport_egress.h", // @@ source files list end } diff --git a/vendor/github.com/cilium/cilium/pkg/node/address.go b/vendor/github.com/cilium/cilium/pkg/node/address.go index 74225b98ae..8bfeea9948 100644 --- a/vendor/github.com/cilium/cilium/pkg/node/address.go +++ b/vendor/github.com/cilium/cilium/pkg/node/address.go @@ -262,12 +262,6 @@ func SetRouterInfo(info RouterInfo) { addrs.mu.Unlock() } -// GetHostMasqueradeIPv4 returns the IPv4 address to be used for masquerading -// any traffic that is being forwarded from the host into the Cilium cluster. -func GetHostMasqueradeIPv4() net.IP { - return GetInternalIPv4Router() -} - // SetIPv4AllocRange sets the IPv4 address pool to use when allocating // addresses for local endpoints func SetIPv4AllocRange(net *cidr.CIDR) { @@ -320,12 +314,6 @@ func GetIPv6() net.IP { return clone(n.GetNodeIP(true)) } -// GetHostMasqueradeIPv6 returns the IPv6 address to be used for masquerading -// any traffic that is being forwarded from the host into the Cilium cluster. -func GetHostMasqueradeIPv6() net.IP { - return GetIPv6Router() -} - // GetIPv6Router returns the IPv6 address of the router, e.g. address // of cilium_host device. func GetIPv6Router() net.IP { diff --git a/vendor/github.com/cilium/cilium/pkg/node/address_linux.go b/vendor/github.com/cilium/cilium/pkg/node/address_linux.go index 0f244f0bc0..b35e3261b0 100644 --- a/vendor/github.com/cilium/cilium/pkg/node/address_linux.go +++ b/vendor/github.com/cilium/cilium/pkg/node/address_linux.go @@ -14,6 +14,7 @@ import ( "github.com/vishvananda/netlink" "golang.org/x/sys/unix" + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" "github.com/cilium/cilium/pkg/ip" ) @@ -31,7 +32,7 @@ func firstGlobalAddr(intf string, preferredIP net.IP, family int, preferPublic b } if intf != "" && intf != "undefined" { - link, err = netlink.LinkByName(intf) + link, err = safenetlink.LinkByName(intf) if err != nil { link = nil } else { @@ -40,7 +41,7 @@ func firstGlobalAddr(intf string, preferredIP net.IP, family int, preferPublic b } retryInterface: - addr, err := netlink.AddrList(link, family) + addr, err := safenetlink.AddrList(link, family) if err != nil { return nil, err } @@ -167,11 +168,11 @@ func firstGlobalV6Addr(intf string, preferredIP net.IP, preferPublic bool) (net. // getCiliumHostIPsFromNetDev returns the first IPv4 link local and returns // it func getCiliumHostIPsFromNetDev(devName string) (ipv4GW, ipv6Router net.IP) { - hostDev, err := netlink.LinkByName(devName) + hostDev, err := safenetlink.LinkByName(devName) if err != nil { return nil, nil } - addrs, err := netlink.AddrList(hostDev, netlink.FAMILY_ALL) + addrs, err := safenetlink.AddrList(hostDev, netlink.FAMILY_ALL) if err != nil { return nil, nil } diff --git a/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go b/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go index f2078f774a..615d1c586a 100644 --- a/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go +++ b/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go @@ -7,6 +7,8 @@ import ( "strings" "github.com/vishvananda/netlink" + + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" ) func init() { @@ -18,7 +20,7 @@ func initExcludedIPs() { prefixes := []string{ "docker", } - links, err := netlink.LinkList() + links, err := safenetlink.LinkList() if err != nil { return } @@ -48,7 +50,7 @@ func initExcludedIPs() { continue } } - addr, err := netlink.AddrList(l, netlink.FAMILY_ALL) + addr, err := safenetlink.AddrList(l, netlink.FAMILY_ALL) if err != nil { continue } diff --git a/vendor/github.com/cilium/cilium/pkg/option/config.go b/vendor/github.com/cilium/cilium/pkg/option/config.go index ce10af41be..9611e85212 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/config.go +++ b/vendor/github.com/cilium/cilium/pkg/option/config.go @@ -9,7 +9,6 @@ import ( "encoding/json" "errors" "fmt" - "io" "math" "net" "net/netip" @@ -31,10 +30,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" - "google.golang.org/protobuf/types/known/fieldmaskpb" k8sLabels "k8s.io/apimachinery/pkg/labels" - flowpb "github.com/cilium/cilium/api/v1/flow" "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/cidr" clustermeshTypes "github.com/cilium/cilium/pkg/clustermesh/types" @@ -549,6 +546,9 @@ const ( // AuthMapEntriesDefault defines the default auth map limit. AuthMapEntriesDefault = 1 << 19 + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled + BPFConntrackAccountingDefault = false + // AuthMapEntriesName configures max entries for BPF auth map. AuthMapEntriesName = "bpf-auth-map-max" @@ -796,9 +796,6 @@ const ( // or direct routing is used and the node CIDR and pod CIDR overlap. EncryptionStrictModeAllowRemoteNodeIdentities = "encryption-strict-mode-allow-remote-node-identities" - // EnableWireguardUserspaceFallback is the name of the option that enables the fallback to WireGuard userspace mode - EnableWireguardUserspaceFallback = "enable-wireguard-userspace-fallback" - // WireguardPersistentKeepalivee controls Wireguard PersistentKeepalive option. Set 0 to disable. WireguardPersistentKeepalive = "wireguard-persistent-keepalive" @@ -969,141 +966,6 @@ const ( // PolicyAccountingArg argument enable policy accounting. PolicyAccountingArg = "policy-accounting" - // EnableHubble enables hubble in the agent. - EnableHubble = "enable-hubble" - - // HubbleSocketPath specifies the UNIX domain socket for Hubble server to listen to. - HubbleSocketPath = "hubble-socket-path" - - // HubbleListenAddress specifies address for Hubble server to listen to. - HubbleListenAddress = "hubble-listen-address" - - // HubblePreferIpv6 controls whether IPv6 or IPv4 addresses should be preferred for - // communication to agents, if both are available. - HubblePreferIpv6 = "hubble-prefer-ipv6" - - // HubbleTLSDisabled allows the Hubble server to run on the given listen - // address without TLS. - HubbleTLSDisabled = "hubble-disable-tls" - - // HubbleTLSCertFile specifies the path to the public key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSCertFile = "hubble-tls-cert-file" - - // HubbleTLSKeyFile specifies the path to the private key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSKeyFile = "hubble-tls-key-file" - - // HubbleTLSClientCAFiles specifies the path to one or more client CA - // certificates to use for TLS with mutual authentication (mTLS). The files - // must contain PEM encoded data. - HubbleTLSClientCAFiles = "hubble-tls-client-ca-files" - - // HubbleEventBufferCapacity specifies the capacity of Hubble events buffer. - HubbleEventBufferCapacity = "hubble-event-buffer-capacity" - - // HubbleEventQueueSize specifies the buffer size of the channel to receive monitor events. - HubbleEventQueueSize = "hubble-event-queue-size" - - // HubbleMetricsServer specifies the addresses to serve Hubble metrics on. - HubbleMetricsServer = "hubble-metrics-server" - - // HubbleMetricsTLSEnabled allows the Hubble metrics server to run on the given listen - // address with TLS. - HubbleMetricsTLSEnabled = "hubble-metrics-server-enable-tls" - - // HubbleMetricsServerTLSCertFile specifies the path to the public key file for the - // Hubble metrics server. The file must contain PEM encoded data. - HubbleMetricsTLSCertFile = "hubble-metrics-server-tls-cert-file" - - // HubbleMetricsServerTLSKeyFile specifies the path to the private key file for the - // Hubble metrics server. The file must contain PEM encoded data. - HubbleMetricsTLSKeyFile = "hubble-metrics-server-tls-key-file" - - // HubbleMetricsServerTLSClientCAFiles specifies the path to one or more client CA - // certificates to use for TLS with mutual authentication (mTLS) on the Hubble metrics server. - // The files must contain PEM encoded data. - HubbleMetricsTLSClientCAFiles = "hubble-metrics-server-tls-client-ca-files" - - // HubbleMetrics specifies enabled metrics and their configuration options. - HubbleMetrics = "hubble-metrics" - - // HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs. - // e.g. "/etc/cilium/flowlog.yaml" - HubbleFlowlogsConfigFilePath = "hubble-flowlogs-config-path" - - // HubbleExportFilePath specifies the filepath to write Hubble events to. - // e.g. "/var/run/cilium/hubble/events.log" - HubbleExportFilePath = "hubble-export-file-path" - - // HubbleExportFileMaxSizeMB specifies the file size in MB at which to rotate - // the Hubble export file. - HubbleExportFileMaxSizeMB = "hubble-export-file-max-size-mb" - - // HubbleExportFileMaxBacks specifies the number of rotated files to keep. - HubbleExportFileMaxBackups = "hubble-export-file-max-backups" - - // HubbleExportFileCompress specifies whether rotated files are compressed. - HubbleExportFileCompress = "hubble-export-file-compress" - - // HubbleExportAllowlist specifies allow list filter use by exporter. - HubbleExportAllowlist = "hubble-export-allowlist" - - // HubbleExportDenylist specifies deny list filter use by exporter. - HubbleExportDenylist = "hubble-export-denylist" - - // HubbleExportFieldmask specifies list of fields to log in exporter. - HubbleExportFieldmask = "hubble-export-fieldmask" - - // EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served - EnableHubbleRecorderAPI = "enable-hubble-recorder-api" - - // EnableHubbleOpenMetrics enables exporting hubble metrics in OpenMetrics format. - EnableHubbleOpenMetrics = "enable-hubble-open-metrics" - - // HubbleRecorderStoragePath specifies the directory in which pcap files - // created via the Hubble Recorder API are stored - HubbleRecorderStoragePath = "hubble-recorder-storage-path" - - // HubbleRecorderSinkQueueSize is the queue size for each recorder sink - HubbleRecorderSinkQueueSize = "hubble-recorder-sink-queue-size" - - // HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped - HubbleSkipUnknownCGroupIDs = "hubble-skip-unknown-cgroup-ids" - - // HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe. - // By default, Hubble observes all monitor events. - HubbleMonitorEvents = "hubble-monitor-events" - - // HubbleRedactEnabled controls if sensitive information will be redacted from L7 flows - HubbleRedactEnabled = "hubble-redact-enabled" - - // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows - HubbleRedactHttpURLQuery = "hubble-redact-http-urlquery" - - // HubbleRedactHttpUserInfo controls if the user info will be redacted from flows - HubbleRedactHttpUserInfo = "hubble-redact-http-userinfo" - - // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows - HubbleRedactKafkaApiKey = "hubble-redact-kafka-apikey" - - // HubbleRedactHttpHeadersAllow controls which http headers will not be redacted from flows - HubbleRedactHttpHeadersAllow = "hubble-redact-http-headers-allow" - - // HubbleRedactHttpHeadersDeny controls which http headers will be redacted from flows - HubbleRedactHttpHeadersDeny = "hubble-redact-http-headers-deny" - - // HubbleDropEvents controls whether Hubble should create v1.Events - // for packet drops related to pods - HubbleDropEvents = "hubble-drop-events" - - // HubbleDropEventsInterval controls the minimum time between emitting events - // with the same source and destination IP - HubbleDropEventsInterval = "hubble-drop-events-interval" - - // HubbleDropEventsReasons controls which drop reasons to emit events for - HubbleDropEventsReasons = "hubble-drop-events-reasons" - // K8sClientConnectionTimeout configures the timeout for K8s client connections. K8sClientConnectionTimeout = "k8s-client-connection-timeout" @@ -1139,6 +1001,9 @@ const ( // LBAffinityMapMaxEntries configures max entries of bpf map for session affinity. LBAffinityMapMaxEntries = "bpf-lb-affinity-map-max" + // LBSourceRangeAllTypes configures service source ranges for all service types. + LBSourceRangeAllTypes = "bpf-lb-source-range-all-types" + // LBSourceRangeMapMaxEntries configures max entries of bpf map for service source ranges. LBSourceRangeMapMaxEntries = "bpf-lb-source-range-map-max" @@ -1237,6 +1102,13 @@ const ( // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy = "enable-k8s-networkpolicy" + // EnableCiliumNetworkPolicy enables support for Cilium Network Policy. + EnableCiliumNetworkPolicy = "enable-cilium-network-policy" + + // EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide + // Network Policy. + EnableCiliumClusterwideNetworkPolicy = "enable-cilium-clusterwide-network-policy" + // PolicyCIDRMatchMode defines the entities that CIDR selectors can reach PolicyCIDRMatchMode = "policy-cidr-match-mode" @@ -1256,8 +1128,8 @@ const ( // BPFEventsTraceEnabled defines the TraceNotification setting for any endpoint BPFEventsTraceEnabled = "bpf-events-trace-enabled" - // BPFConntrackAccountingEnabled controls whether CT accounting for packets and bytes is enabled - BPFConntrackAccountingEnabled = "bpf-conntrack-accounting-enabled" + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled + BPFConntrackAccounting = "bpf-conntrack-accounting" // EnableInternalTrafficPolicy enables handling routing for services with internalTrafficPolicy configured EnableInternalTrafficPolicy = "enable-internal-traffic-policy" @@ -1314,6 +1186,9 @@ const ( // EnableExternalWorkloads enables the support for external workloads. EnableExternalWorkloads = "enable-external-workloads" + + // EnableSourceIPVerification enables the source ip verification, defaults to true + EnableSourceIPVerification = "enable-source-ip-verification" ) const ( @@ -1696,9 +1571,6 @@ type DaemonConfig struct { // or direct routing is used and the node CIDR and pod CIDR overlap. EncryptionStrictModeAllowRemoteNodeIdentities bool - // EnableWireguardUserspaceFallback enables the fallback to the userspace implementation - EnableWireguardUserspaceFallback bool - // WireguardPersistentKeepalive controls Wireguard PersistentKeepalive option. WireguardPersistentKeepalive time.Duration @@ -2147,141 +2019,6 @@ type DaemonConfig struct { // PolicyAccounting enable policy accounting PolicyAccounting bool - // EnableHubble specifies whether to enable the hubble server. - EnableHubble bool - - // HubbleSocketPath specifies the UNIX domain socket for Hubble server to listen to. - HubbleSocketPath string - - // HubbleListenAddress specifies address for Hubble to listen to. - HubbleListenAddress string - - // HubblePreferIpv6 controls whether IPv6 or IPv4 addresses should be preferred for - // communication to agents, if both are available. - HubblePreferIpv6 bool - - // HubbleTLSDisabled allows the Hubble server to run on the given listen - // address without TLS. - HubbleTLSDisabled bool - - // HubbleTLSCertFile specifies the path to the public key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSCertFile string - - // HubbleTLSKeyFile specifies the path to the private key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSKeyFile string - - // HubbleTLSClientCAFiles specifies the path to one or more client CA - // certificates to use for TLS with mutual authentication (mTLS). The files - // must contain PEM encoded data. - HubbleTLSClientCAFiles []string - - // HubbleEventBufferCapacity specifies the capacity of Hubble events buffer. - HubbleEventBufferCapacity int - - // HubbleEventQueueSize specifies the buffer size of the channel to receive monitor events. - HubbleEventQueueSize int - - // HubbleMetricsServer specifies the addresses to serve Hubble metrics on. - HubbleMetricsServer string - - // HubbleMetricsServerTLSEnabled allows the Hubble metrics server to run on the given listen - // address with TLS. - HubbleMetricsServerTLSEnabled bool - - // HubbleMetricsServerTLSCertFile specifies the path to the public key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleMetricsServerTLSCertFile string - - // HubbleMetricsServerTLSKeyFile specifies the path to the private key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleMetricsServerTLSKeyFile string - - // HubbleMetricsServerTLSClientCAFiles specifies the path to one or more client CA - // certificates to use for TLS with mutual authentication (mTLS). The files - // must contain PEM encoded data. - HubbleMetricsServerTLSClientCAFiles []string - - // HubbleMetrics specifies enabled metrics and their configuration options. - HubbleMetrics []string - - // HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs. - // e.g. "/etc/cilium/flowlog.yaml" - HubbleFlowlogsConfigFilePath string - - // HubbleExportFilePath specifies the filepath to write Hubble events to. - // e.g. "/var/run/cilium/hubble/events.log" - HubbleExportFilePath string - - // HubbleExportFileMaxSizeMB specifies the file size in MB at which to rotate - // the Hubble export file. - HubbleExportFileMaxSizeMB int - - // HubbleExportFileMaxBacks specifies the number of rotated files to keep. - HubbleExportFileMaxBackups int - - // HubbleExportFileCompress specifies whether rotated files are compressed. - HubbleExportFileCompress bool - - // HubbleExportAllowlist specifies allow list filter use by exporter. - HubbleExportAllowlist []*flowpb.FlowFilter - - // HubbleExportDenylist specifies deny list filter use by exporter. - HubbleExportDenylist []*flowpb.FlowFilter - - // HubbleExportFieldmask specifies list of fields to log in exporter. - HubbleExportFieldmask []string - - // EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served - EnableHubbleRecorderAPI bool - - // EnableHubbleOpenMetrics enables exporting hubble metrics in OpenMetrics format. - EnableHubbleOpenMetrics bool - - // HubbleRecorderStoragePath specifies the directory in which pcap files - // created via the Hubble Recorder API are stored - HubbleRecorderStoragePath string - - // HubbleRecorderSinkQueueSize is the queue size for each recorder sink - HubbleRecorderSinkQueueSize int - - // HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped - HubbleSkipUnknownCGroupIDs bool - - // HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe. - // By default, Hubble observes all monitor events. - HubbleMonitorEvents []string - - // HubbleRedactEnabled controls if Hubble will be redacting sensitive information from L7 flows - HubbleRedactEnabled bool - - // HubbleRedactURLQuery controls if the URL query will be redacted from flows - HubbleRedactHttpURLQuery bool - - // HubbleRedactUserInfo controls if the user info will be redacted from flows - HubbleRedactHttpUserInfo bool - - // HubbleRedactKafkaApiKey controls if Kafka API key will be redacted from flows - HubbleRedactKafkaApiKey bool - - // HubbleRedactHttpHeadersAllow controls which http headers will not be redacted from flows - HubbleRedactHttpHeadersAllow []string - - // HubbleRedactHttpHeadersDeny controls which http headers will be redacted from flows - HubbleRedactHttpHeadersDeny []string - - // HubbleDropEvents controls whether Hubble should create v1.Events - // for packet drops related to pods - HubbleDropEvents bool - - // HubbleDropEventsInterval controls the minimum time between emitting events - // with the same source and destination IP - HubbleDropEventsInterval time.Duration - - // HubbleDropEventsReasons controls which drop reasons to emit events for - HubbleDropEventsReasons []string - // EnableIPv4FragmentsTracking enables IPv4 fragments tracking for // L4-based lookups. Needs LRU map support. EnableIPv4FragmentsTracking bool @@ -2328,6 +2065,10 @@ type DaemonConfig struct { // LBAffinityMapEntries is the maximum number of entries allowed in BPF lbmap for session affinities. LBAffinityMapEntries int + // LBSourceRangeAllTypes enables propagation of loadbalancerSourceRanges to all Kubernetes + // service types which were created from the LoadBalancer service. + LBSourceRangeAllTypes bool + // LBSourceRangeMapEntries is the maximum number of entries allowed in BPF lbmap for source ranges. LBSourceRangeMapEntries int @@ -2443,8 +2184,8 @@ type DaemonConfig struct { // BPFEventsTraceEnabled controls whether the Cilium datapath exposes "trace" events to Cilium monitor and Hubble. BPFEventsTraceEnabled bool - // BPFConntrackAccountingEnabled controls whether CT accounting for packets and bytes is enabled. - BPFConntrackAccountingEnabled bool + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled. + BPFConntrackAccounting bool // IPAMCiliumNodeUpdateRate is the maximum rate at which the CiliumNode custom // resource is updated. @@ -2453,6 +2194,13 @@ type DaemonConfig struct { // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy bool + // EnableCiliumNetworkPolicy enables support for Cilium Network Policy. + EnableCiliumNetworkPolicy bool + + // EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide + // Network Policy. + EnableCiliumClusterwideNetworkPolicy bool + // PolicyCIDRMatchMode is the list of entities that can be selected by CIDR policy. // Currently supported values: // - world @@ -2488,6 +2236,9 @@ type DaemonConfig struct { // EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode EnableNonDefaultDenyPolicies bool + + // EnableSourceIPVerification enables the source ip validation of connection from endpoints to endpoints + EnableSourceIPVerification bool } var ( @@ -2531,21 +2282,25 @@ var ( K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery, - ExternalClusterIP: defaults.ExternalClusterIP, - EnableVTEP: defaults.EnableVTEP, - EnableBGPControlPlane: defaults.EnableBGPControlPlane, - EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy, - PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode, - MaxConnectedClusters: defaults.MaxConnectedClusters, + ExternalClusterIP: defaults.ExternalClusterIP, + EnableVTEP: defaults.EnableVTEP, + EnableBGPControlPlane: defaults.EnableBGPControlPlane, + EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy, + EnableCiliumNetworkPolicy: defaults.EnableCiliumNetworkPolicy, + EnableCiliumClusterwideNetworkPolicy: defaults.EnableCiliumClusterwideNetworkPolicy, + PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode, + MaxConnectedClusters: defaults.MaxConnectedClusters, BPFEventsDropEnabled: defaults.BPFEventsDropEnabled, BPFEventsPolicyVerdictEnabled: defaults.BPFEventsPolicyVerdictEnabled, BPFEventsTraceEnabled: defaults.BPFEventsTraceEnabled, - BPFConntrackAccountingEnabled: defaults.BPFConntrackAccountingEnabled, + BPFConntrackAccounting: defaults.BPFConntrackAccounting, EnableEnvoyConfig: defaults.EnableEnvoyConfig, EnableInternalTrafficPolicy: defaults.EnableInternalTrafficPolicy, EnableNonDefaultDenyPolicies: defaults.EnableNonDefaultDenyPolicies, + + EnableSourceIPVerification: defaults.EnableSourceIPVerification, } ) @@ -2669,6 +2424,11 @@ func (c *DaemonConfig) IPv6Enabled() bool { return c.EnableIPv6 } +// LBProtoDiffEnabled returns true if LoadBalancerProtocolDifferentiation is enabled +func (c *DaemonConfig) LBProtoDiffEnabled() bool { + return c.LoadBalancerProtocolDifferentiation +} + // IPv6NDPEnabled returns true if IPv6 NDP support is enabled func (c *DaemonConfig) IPv6NDPEnabled() bool { return c.EnableIPv6NDP @@ -2798,13 +2558,6 @@ func (c *DaemonConfig) validateIPv6NAT46x64CIDR() error { return nil } -func (c *DaemonConfig) validateHubbleRedact() error { - if len(c.HubbleRedactHttpHeadersAllow) > 0 && len(c.HubbleRedactHttpHeadersDeny) > 0 { - return fmt.Errorf("Only one of --hubble-redact-http-headers-allow and --hubble-redact-http-headers-deny can be specified, not both") - } - return nil -} - func (c *DaemonConfig) validateContainerIPLocalReservedPorts() error { if c.ContainerIPLocalReservedPorts == "" || c.ContainerIPLocalReservedPorts == defaults.ContainerIPLocalReservedPortsAuto { return nil @@ -2829,10 +2582,6 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { c.IPv6NAT46x64CIDR, err) } - if err := c.validateHubbleRedact(); err != nil { - return err - } - if c.MTU < 0 { return fmt.Errorf("MTU '%d' cannot be negative", c.MTU) } @@ -2869,7 +2618,7 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { if err := cinfo.InitClusterIDMax(); err != nil { return err } - if err := cinfo.Validate(log); err != nil { + if err := cinfo.Validate(); err != nil { return err } @@ -3002,7 +2751,30 @@ func (c *DaemonConfig) parseExcludedLocalAddresses(s []string) error { return nil } -// Populate sets all options with the values from viper +// SetupLogging sets all logging-related options with the values from viper, +// then setup logging based on these options and the given tag. +// +// This allows initializing logging as early as possible, then log entries +// produced below in Populate can honor the requested logging configurations. +func (c *DaemonConfig) SetupLogging(vp *viper.Viper, tag string) { + c.Debug = vp.GetBool(DebugArg) + c.LogDriver = vp.GetStringSlice(LogDriver) + + if m, err := command.GetStringMapStringE(vp, LogOpt); err != nil { + log.Fatalf("unable to parse %s: %s", LogOpt, err) + } else { + c.LogOpt = m + } + + if err := logging.SetupLogging(c.LogDriver, logging.LogOptions(c.LogOpt), tag, c.Debug); err != nil { + log.Fatal(err) + } +} + +// Populate sets all non-logging options with the values from viper. +// +// This function may emit logs. Consider calling SetupLogging before this +// to make sure that they honor logging-related options. func (c *DaemonConfig) Populate(vp *viper.Viper) { var err error @@ -3022,7 +2794,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName) c.MaxConnectedClusters = vp.GetUint32(clustermeshTypes.OptMaxConnectedClusters) c.DatapathMode = vp.GetString(DatapathMode) - c.Debug = vp.GetBool(DebugArg) c.DebugVerbose = vp.GetStringSlice(DebugVerbose) c.EnableIPv4 = vp.GetBool(EnableIPv4Name) c.EnableIPv6 = vp.GetBool(EnableIPv6Name) @@ -3037,7 +2808,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.L2AnnouncerLeaseDuration = vp.GetDuration(L2AnnouncerLeaseDuration) c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline) c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod) - c.EnableWireguardUserspaceFallback = vp.GetBool(EnableWireguardUserspaceFallback) c.WireguardPersistentKeepalive = vp.GetDuration(WireguardPersistentKeepalive) c.EnableWellKnownIdentities = vp.GetBool(EnableWellKnownIdentities) c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter) @@ -3110,7 +2880,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.LabelPrefixFile = vp.GetString(LabelPrefixFile) c.Labels = vp.GetStringSlice(Labels) c.LibDir = vp.GetString(LibDir) - c.LogDriver = vp.GetStringSlice(LogDriver) c.LogSystemLoadConfig = vp.GetBool(LogSystemLoadConfigName) c.LoopbackIPv4 = vp.GetString(LoopbackIPv4) c.LocalRouterIPv4 = vp.GetString(LocalRouterIPv4) @@ -3173,8 +2942,9 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.BPFEventsDropEnabled = vp.GetBool(BPFEventsDropEnabled) c.BPFEventsPolicyVerdictEnabled = vp.GetBool(BPFEventsPolicyVerdictEnabled) c.BPFEventsTraceEnabled = vp.GetBool(BPFEventsTraceEnabled) - c.BPFConntrackAccountingEnabled = vp.GetBool(BPFConntrackAccountingEnabled) + c.BPFConntrackAccounting = vp.GetBool(BPFConntrackAccounting) c.EnableIPSecEncryptedOverlay = vp.GetBool(EnableIPSecEncryptedOverlay) + c.LBSourceRangeAllTypes = vp.GetBool(LBSourceRangeAllTypes) c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse) switch c.ServiceNoBackendResponse { @@ -3397,12 +3167,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.KVStoreOpt = m } - if m, err := command.GetStringMapStringE(vp, LogOpt); err != nil { - log.Fatalf("unable to parse %s: %s", LogOpt, err) - } else { - c.LogOpt = m - } - bpfEventsDefaultRateLimit := vp.GetUint32(BPFEventsDefaultRateLimit) bpfEventsDefaultBurstLimit := vp.GetUint32(BPFEventsDefaultBurstLimit) switch { @@ -3483,82 +3247,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } c.KubeProxyReplacementHealthzBindAddr = vp.GetString(KubeProxyReplacementHealthzBindAddr) - // Hubble options. - c.EnableHubble = vp.GetBool(EnableHubble) - c.EnableHubbleOpenMetrics = vp.GetBool(EnableHubbleOpenMetrics) - c.HubbleSocketPath = vp.GetString(HubbleSocketPath) - c.HubbleListenAddress = vp.GetString(HubbleListenAddress) - c.HubblePreferIpv6 = vp.GetBool(HubblePreferIpv6) - c.HubbleTLSDisabled = vp.GetBool(HubbleTLSDisabled) - c.HubbleTLSCertFile = vp.GetString(HubbleTLSCertFile) - c.HubbleTLSKeyFile = vp.GetString(HubbleTLSKeyFile) - c.HubbleTLSClientCAFiles = vp.GetStringSlice(HubbleTLSClientCAFiles) - c.HubbleEventBufferCapacity = vp.GetInt(HubbleEventBufferCapacity) - c.HubbleEventQueueSize = vp.GetInt(HubbleEventQueueSize) - if c.HubbleEventQueueSize == 0 { - c.HubbleEventQueueSize = getDefaultMonitorQueueSize(runtime.NumCPU()) - } - c.HubbleMetricsServer = vp.GetString(HubbleMetricsServer) - c.HubbleMetricsServerTLSEnabled = vp.GetBool(HubbleMetricsTLSEnabled) - c.HubbleMetricsServerTLSCertFile = vp.GetString(HubbleMetricsTLSCertFile) - c.HubbleMetricsServerTLSKeyFile = vp.GetString(HubbleMetricsTLSKeyFile) - c.HubbleMetricsServerTLSClientCAFiles = vp.GetStringSlice(HubbleMetricsTLSClientCAFiles) - c.HubbleMetrics = vp.GetStringSlice(HubbleMetrics) - - c.HubbleExportFilePath = vp.GetString(HubbleExportFilePath) - c.HubbleExportFileMaxSizeMB = vp.GetInt(HubbleExportFileMaxSizeMB) - c.HubbleExportFileMaxBackups = vp.GetInt(HubbleExportFileMaxBackups) - c.HubbleExportFileCompress = vp.GetBool(HubbleExportFileCompress) - - for _, enc := range vp.GetStringSlice(HubbleExportAllowlist) { - dec := json.NewDecoder(strings.NewReader(enc)) - var result flowpb.FlowFilter - if err := dec.Decode(&result); err != nil { - if errors.Is(err, io.EOF) { - break - } - log.Fatalf("failed to decode hubble-export-allowlist '%v': %s", enc, err) - } - c.HubbleExportAllowlist = append(c.HubbleExportAllowlist, &result) - } - - for _, enc := range vp.GetStringSlice(HubbleExportDenylist) { - dec := json.NewDecoder(strings.NewReader(enc)) - var result flowpb.FlowFilter - if err := dec.Decode(&result); err != nil { - if errors.Is(err, io.EOF) { - break - } - log.Fatalf("failed to decode hubble-export-denylist '%v': %s", enc, err) - } - c.HubbleExportDenylist = append(c.HubbleExportDenylist, &result) - } - - if fm := vp.GetStringSlice(HubbleExportFieldmask); len(fm) > 0 { - _, err := fieldmaskpb.New(&flowpb.Flow{}, fm...) - if err != nil { - log.Fatalf("hubble-export-fieldmask contains invalid fieldmask '%v': %s", fm, err) - } - c.HubbleExportFieldmask = vp.GetStringSlice(HubbleExportFieldmask) - } - - c.HubbleFlowlogsConfigFilePath = vp.GetString(HubbleFlowlogsConfigFilePath) - - c.EnableHubbleRecorderAPI = vp.GetBool(EnableHubbleRecorderAPI) - c.HubbleRecorderStoragePath = vp.GetString(HubbleRecorderStoragePath) - c.HubbleRecorderSinkQueueSize = vp.GetInt(HubbleRecorderSinkQueueSize) - c.HubbleSkipUnknownCGroupIDs = vp.GetBool(HubbleSkipUnknownCGroupIDs) - c.HubbleMonitorEvents = vp.GetStringSlice(HubbleMonitorEvents) - c.HubbleRedactEnabled = vp.GetBool(HubbleRedactEnabled) - c.HubbleRedactHttpURLQuery = vp.GetBool(HubbleRedactHttpURLQuery) - c.HubbleRedactHttpUserInfo = vp.GetBool(HubbleRedactHttpUserInfo) - c.HubbleRedactKafkaApiKey = vp.GetBool(HubbleRedactKafkaApiKey) - c.HubbleRedactHttpHeadersAllow = vp.GetStringSlice(HubbleRedactHttpHeadersAllow) - c.HubbleRedactHttpHeadersDeny = vp.GetStringSlice(HubbleRedactHttpHeadersDeny) - c.HubbleDropEvents = vp.GetBool(HubbleDropEvents) - c.HubbleDropEventsInterval = vp.GetDuration(HubbleDropEventsInterval) - c.HubbleDropEventsReasons = vp.GetStringSlice(HubbleDropEventsReasons) - // Hidden options c.CompilerFlags = vp.GetStringSlice(CompilerFlags) c.ConfigFile = vp.GetString(ConfigFile) @@ -3585,6 +3273,9 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableNodeSelectorLabels = vp.GetBool(EnableNodeSelectorLabels) c.NodeLabels = vp.GetStringSlice(NodeLabels) + c.EnableCiliumNetworkPolicy = vp.GetBool(EnableCiliumNetworkPolicy) + c.EnableCiliumClusterwideNetworkPolicy = vp.GetBool(EnableCiliumClusterwideNetworkPolicy) + // Parse node label patterns nodeLabelPatterns := vp.GetStringSlice(ExcludeNodeLabelPatterns) for _, pattern := range nodeLabelPatterns { @@ -3603,6 +3294,8 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.LoadBalancerProtocolDifferentiation = vp.GetBool(LoadBalancerProtocolDifferentiation) c.EnableInternalTrafficPolicy = vp.GetBool(EnableInternalTrafficPolicy) + + c.EnableSourceIPVerification = vp.GetBool(EnableSourceIPVerification) } func (c *DaemonConfig) populateLoadBalancerSettings(vp *viper.Viper) { @@ -4327,14 +4020,6 @@ func InitConfig(cmd *cobra.Command, programName, configName string, vp *viper.Vi } } -func getDefaultMonitorQueueSize(numCPU int) int { - monitorQueueSize := numCPU * defaults.MonitorQueueSizePerCPU - if monitorQueueSize > defaults.MonitorQueueSizePerCPUMaximum { - monitorQueueSize = defaults.MonitorQueueSizePerCPUMaximum - } - return monitorQueueSize -} - // BPFEventBufferConfig contains parsed configuration for a bpf map event buffer. type BPFEventBufferConfig struct { Enabled bool diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go index 68e900aaea..f377c41c5a 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go @@ -4,11 +4,10 @@ package api import ( - "net" "net/netip" "strings" - "github.com/cilium/cilium/pkg/ip" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" "github.com/cilium/cilium/pkg/labels" "github.com/cilium/cilium/pkg/option" ) @@ -22,6 +21,10 @@ type CIDR string var ( ipv4All = CIDR("0.0.0.0/0") ipv6All = CIDR("::/0") + + worldLabelNonDualStack = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorld} + worldLabelV4 = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorldIPv4} + worldLabelV6 = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorldIPv6} ) // CIDRRule is a rule that specifies a CIDR prefix to/from which outside @@ -136,52 +139,76 @@ type CIDRRuleSlice []CIDRRule // GetAsEndpointSelectors returns the provided CIDRRule slice as a slice of // endpoint selectors +// +// The ExceptCIDRs block is inserted as a negative match. Specifically, the +// DoesNotExist qualifier. For example, the CIDRRule +// +// cidr: 1.1.1.0/24 +// exceptCIDRs: ["1.1.1.1/32"] +// +// results in the selector equivalent to "cidr:1.1.1.0/24 !cidr:1.1.1.1/32". +// +// This works because the label selectors will select numeric identities belonging only +// to the shorter prefixes. However, longer prefixes will have a different numeric +// identity, as the bpf ipcache is an LPM lookup. This essentially acts as a +// "carve-out", using the LPM mechanism to exlude subsets of a larger prefix. func (s CIDRRuleSlice) GetAsEndpointSelectors() EndpointSelectorSlice { - cidrs := ComputeResultantCIDRSet(s) - ces := cidrs.GetAsEndpointSelectors() + ces := make(EndpointSelectorSlice, 0, len(s)) - // expand cidrgroup selectors for _, r := range s { - if r.CIDRGroupRef != "" { - ces = append(ces, NewESFromLabels(LabelForCIDRGroupRef(string(r.CIDRGroupRef)))) + ls := slim_metav1.LabelSelector{ + MatchExpressions: make([]slim_metav1.LabelSelectorRequirement, 0, 1+len(r.ExceptCIDRs)), } - } - - return ces -} - -// StringSlice returns the CIDRRuleSlice as a slice of strings. -func (s CIDRRuleSlice) StringSlice() []string { - result := make([]string, 0, len(s)) - for _, c := range s { - result = append(result, c.String()) - } - return result -} -// ComputeResultantCIDRSet converts a slice of CIDRRules into a slice of -// individual CIDRs. This expands the cidr defined by each CIDRRule, applies -// the CIDR exceptions defined in "ExceptCIDRs", and forms a minimal set of -// CIDRs that cover all of the CIDRRules. -// -// Assumes no error checking is necessary as CIDRRule.Sanitize already does this. -func ComputeResultantCIDRSet(cidrs CIDRRuleSlice) CIDRSlice { - var allResultantAllowedCIDRs CIDRSlice - for _, s := range cidrs { - _, allowNet, _ := net.ParseCIDR(string(s.Cidr)) - - var removeSubnets []*net.IPNet - for _, t := range s.ExceptCIDRs { - _, removeSubnet, _ := net.ParseCIDR(string(t)) - removeSubnets = append(removeSubnets, removeSubnet) + // add the "main" label: + // either a CIDR or CIDRGroupRef + if r.Cidr != "" { + var lbl labels.Label + switch r.Cidr { + case ipv4All: + if option.Config.IsDualStack() { + lbl = worldLabelV4 + } else { + lbl = worldLabelNonDualStack + } + case ipv6All: + if option.Config.IsDualStack() { + lbl = worldLabelV6 + } else { + lbl = worldLabelNonDualStack + } + default: + lbl, _ = labels.IPStringToLabel(string(r.Cidr)) + } + ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{ + Key: lbl.GetExtendedKey(), + Operator: slim_metav1.LabelSelectorOpExists, + }) + } else if r.CIDRGroupRef != "" { + lbl := LabelForCIDRGroupRef(string(r.CIDRGroupRef)) + ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{ + Key: lbl.GetExtendedKey(), + Operator: slim_metav1.LabelSelectorOpExists, + }) + } else { + // should never be hit, but paranoia + continue } - resultantAllowedCIDRs := ip.RemoveCIDRs([]*net.IPNet{allowNet}, removeSubnets) - for _, u := range resultantAllowedCIDRs { - allResultantAllowedCIDRs = append(allResultantAllowedCIDRs, CIDR(u.String())) + // exclude any excepted CIDRs. + // Do so by inserting a "DoesNotExist" requirement for the given prefix key + for _, exceptCIDR := range r.ExceptCIDRs { + lbl, _ := labels.IPStringToLabel(string(exceptCIDR)) + ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{ + Key: lbl.GetExtendedKey(), + Operator: slim_metav1.LabelSelectorOpDoesNotExist, + }) } + + ces = append(ces, NewESFromK8sLabelSelector("", &ls)) } - return allResultantAllowedCIDRs + + return ces } // addrsToCIDRRules generates CIDRRules for the IPs passed in. diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go index 9c78f93faa..86cb890b20 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go @@ -28,6 +28,9 @@ var ( // Sanitize validates and sanitizes a policy rule. Minor edits such as // capitalization of the protocol name are automatically fixed up. More // fundamental violations will cause an error to be returned. +// +// Note: this function is called from both the operator and the agent; +// make sure any configuration flags are bound in **both** binaries. func (r *Rule) Sanitize() error { if option.Config.EnableNonDefaultDenyPolicies { // Fill in the default traffic posture of this Rule. @@ -218,13 +221,30 @@ func countNonGeneratedCIDRRules(s CIDRRuleSlice) int { return n } +// countNonGeneratedEndpoints counts the number of EndpointSelector items which are not +// `Generated`, i.e. were directly provided by the user. +// The `Generated` field is currently only set by the `ToServices` +// implementation, which extracts service endpoints and translates them as +// ToEndpoints rules before the CNP is passed to the policy repository. +// Therefore, we want to allow the combination of ToEndpoints and ToServices +// rules, if (and only if) the ToEndpoints only contains `Generated` entries. +func countNonGeneratedEndpoints(s []EndpointSelector) int { + n := 0 + for _, c := range s { + if !c.Generated { + n++ + } + } + return n +} + func (e *EgressRule) sanitize(hostPolicy bool) error { var retErr error l3Members := map[string]int{ "ToCIDR": len(e.ToCIDR), "ToCIDRSet": countNonGeneratedCIDRRules(e.ToCIDRSet), - "ToEndpoints": len(e.ToEndpoints), + "ToEndpoints": countNonGeneratedEndpoints(e.ToEndpoints), "ToEntities": len(e.ToEntities), "ToServices": len(e.ToServices), "ToFQDNs": len(e.ToFQDNs), @@ -236,7 +256,7 @@ func (e *EgressRule) sanitize(hostPolicy bool) error { "ToCIDRSet": true, "ToEndpoints": true, "ToEntities": true, - "ToServices": false, // see https://github.com/cilium/cilium/issues/20067 + "ToServices": true, "ToFQDNs": true, "ToGroups": true, "ToNodes": true, diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go index c23aa0d9c2..7dc6c987fb 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go @@ -34,6 +34,10 @@ type EndpointSelector struct { // EndpointSelectors are created via `NewESFromMatchRequirements`. It is // immutable after its creation. cachedLabelSelectorString string `json:"-"` + + // Generated indicates whether the rule was generated based on other rules + // or provided by user + Generated bool `json:"-"` } // LabelSelectorString returns a user-friendly string representation of diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go index ced0d973b7..557a0ba65a 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go @@ -414,6 +414,9 @@ func (in *EndpointSelector) DeepEqual(other *EndpointSelector) bool { if in.cachedLabelSelectorString != other.cachedLabelSelectorString { return false } + if in.Generated != other.Generated { + return false + } return true } @@ -1377,6 +1380,9 @@ func (in *ServiceSelector) DeepEqual(other *ServiceSelector) bool { if in.cachedLabelSelectorString != other.cachedLabelSelectorString { return false } + if in.Generated != other.Generated { + return false + } return true } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/cidr.go b/vendor/github.com/cilium/cilium/pkg/policy/cidr.go index 33f14a9cd9..e854e677a0 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/cidr.go @@ -8,6 +8,8 @@ import ( "github.com/cilium/cilium/pkg/ip" "github.com/cilium/cilium/pkg/policy/api" + + "k8s.io/apimachinery/pkg/util/sets" ) // getPrefixesFromCIDR fetches all CIDRs referred to by the specified slice @@ -18,58 +20,75 @@ func getPrefixesFromCIDR(cidrs api.CIDRSlice) []netip.Prefix { } // GetPrefixesFromCIDRSet fetches all CIDRs referred to by the specified slice -// and returns them as regular golang CIDR objects. +// and returns them as regular golang CIDR objects. Includes CIDRs listed in +// ExceptCIDRs fields. // // Assumes that validation already occurred on 'rules'. func GetPrefixesFromCIDRSet(rules api.CIDRRuleSlice) []netip.Prefix { - cidrs := api.ComputeResultantCIDRSet(rules) - return getPrefixesFromCIDR(cidrs) + out := make([]netip.Prefix, 0, len(rules)) + for _, rule := range rules { + if rule.Cidr != "" { + pfx, err := netip.ParsePrefix(string(rule.Cidr)) + if err == nil { + // must parse, was already validated. + out = append(out, pfx.Masked()) + } + } + for _, except := range rule.ExceptCIDRs { + pfx, err := netip.ParsePrefix(string(except)) + if err == nil { + out = append(out, pfx.Masked()) + } + } + } + + return out } // GetCIDRPrefixes runs through the specified 'rules' to find every reference // to a CIDR in the rules, and returns a slice containing all of these CIDRs. -// Multiple rules referring to the same CIDR will result in multiple copies of -// the CIDR in the returned slice. +// +// Includes prefixes referenced solely by "ExceptCIDRs" entries. // // Assumes that validation already occurred on 'rules'. func GetCIDRPrefixes(rules api.Rules) []netip.Prefix { if len(rules) == 0 { return nil } - res := make([]netip.Prefix, 0, 32) + res := make(sets.Set[netip.Prefix], 32) for _, r := range rules { for _, ir := range r.Ingress { if len(ir.FromCIDR) > 0 { - res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...) + res.Insert(getPrefixesFromCIDR(ir.FromCIDR)...) } if len(ir.FromCIDRSet) > 0 { - res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...) + res.Insert(GetPrefixesFromCIDRSet(ir.FromCIDRSet)...) } } for _, ir := range r.IngressDeny { if len(ir.FromCIDR) > 0 { - res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...) + res.Insert(getPrefixesFromCIDR(ir.FromCIDR)...) } if len(ir.FromCIDRSet) > 0 { - res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...) + res.Insert(GetPrefixesFromCIDRSet(ir.FromCIDRSet)...) } } for _, er := range r.Egress { if len(er.ToCIDR) > 0 { - res = append(res, getPrefixesFromCIDR(er.ToCIDR)...) + res.Insert(getPrefixesFromCIDR(er.ToCIDR)...) } if len(er.ToCIDRSet) > 0 { - res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...) + res.Insert(GetPrefixesFromCIDRSet(er.ToCIDRSet)...) } } for _, er := range r.EgressDeny { if len(er.ToCIDR) > 0 { - res = append(res, getPrefixesFromCIDR(er.ToCIDR)...) + res.Insert(getPrefixesFromCIDR(er.ToCIDR)...) } if len(er.ToCIDRSet) > 0 { - res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...) + res.Insert(GetPrefixesFromCIDRSet(er.ToCIDRSet)...) } } } - return res + return res.UnsortedList() } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/distillery.go b/vendor/github.com/cilium/cilium/pkg/policy/distillery.go index ce55f54e6f..61b5822393 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/distillery.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/distillery.go @@ -5,6 +5,7 @@ package policy import ( "fmt" + "iter" "sync/atomic" "github.com/cilium/cilium/pkg/container/versioned" @@ -17,9 +18,13 @@ import ( // the policy repository and ready to be distilled against a set of identities // to compute datapath-level policy configuration. type SelectorPolicy interface { + // CreateRedirects is used to ensure the endpoint has created all the needed redirects + // before a new EndpointPolicy is created. + RedirectFilters() iter.Seq2[*L4Filter, *PerSelectorPolicy] + // Consume returns the policy in terms of connectivity to peer // Identities. - Consume(owner PolicyOwner) *EndpointPolicy + Consume(owner PolicyOwner, redirects map[string]uint16) *EndpointPolicy } // PolicyCache represents a cache of resolved policies for identities. @@ -35,7 +40,7 @@ type PolicyCache struct { } // NewPolicyCache creates a new cache of SelectorPolicy. -func NewPolicyCache(repo *Repository, idmgr *identitymanager.IdentityManager) *PolicyCache { +func NewPolicyCache(repo *Repository, idmgr identitymanager.IDManager) *PolicyCache { cache := &PolicyCache{ repo: repo, policies: make(map[identityPkg.NumericIdentity]*cachedSelectorPolicy), @@ -229,10 +234,14 @@ func (cip *cachedSelectorPolicy) setPolicy(policy *selectorPolicy) { // // This denotes that a particular endpoint is 'consuming' the policy from the // selector policy cache. -func (cip *cachedSelectorPolicy) Consume(owner PolicyOwner) *EndpointPolicy { +func (cip *cachedSelectorPolicy) Consume(owner PolicyOwner, redirects map[string]uint16) *EndpointPolicy { // TODO: This currently computes the EndpointPolicy from SelectorPolicy // on-demand, however in future the cip is intended to cache the // EndpointPolicy for this Identity and emit datapath deltas instead. isHost := cip.identity.ID == identityPkg.ReservedIdentityHost - return cip.getPolicy().DistillPolicy(owner, isHost) + return cip.getPolicy().DistillPolicy(owner, redirects, isHost) +} + +func (cip *cachedSelectorPolicy) RedirectFilters() iter.Seq2[*L4Filter, *PerSelectorPolicy] { + return cip.getPolicy().RedirectFilters() } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/l4.go index b47c67fcb9..041bed8a65 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/l4.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/l4.go @@ -12,7 +12,6 @@ import ( "strconv" "strings" "sync/atomic" - "testing" cilium "github.com/cilium/proxy/go/cilium/api" "github.com/sirupsen/logrus" @@ -60,8 +59,9 @@ func (l4rule *PerSelectorPolicy) covers(l3l4rule *PerSelectorPolicy) bool { if l3l4IsRedirect && !l4OnlyIsRedirect { // Can not skip if l3l4-rule is redirect while l4-only is not return false - } else if l3l4IsRedirect && l4OnlyIsRedirect && l3l4rule.Listener != l4rule.Listener { - // L3l4 rule has a different listener, it can not be skipped + } else if l3l4IsRedirect && l4OnlyIsRedirect && + (l3l4rule.Listener != l4rule.Listener || l3l4rule.Priority != l4rule.Priority) { + // L3l4 rule has a different listener or priority, it can not be skipped return false } @@ -504,7 +504,7 @@ func (l4 *L4Filter) GetPort() uint16 { } // Equals returns true if two L4Filters are equal -func (l4 *L4Filter) Equals(_ *testing.T, bL4 *L4Filter) bool { +func (l4 *L4Filter) Equals(bL4 *L4Filter) bool { if l4.Port == bL4.Port && l4.EndPort == bL4.EndPort && l4.PortName == bL4.PortName && @@ -529,14 +529,24 @@ func (l4 *L4Filter) Equals(_ *testing.T, bL4 *L4Filter) bool { } // ChangeState allows caller to revert changes made by (multiple) toMapState call(s) +// All fields are maps so we can pass this by value. type ChangeState struct { - Adds Keys // Added or modified keys, if not nil - Deletes Keys // deleted keys, if not nil - Old MapStateMap // Old values of all modified or deleted keys, if not nil + Adds Keys // Added or modified keys, if not nil + Deletes Keys // deleted keys, if not nil + old map[Key]mapStateEntry // Old values of all modified or deleted keys, if not nil +} + +// NewRevertState returns an empty ChangeState suitable for reverting MapState changes. +// The private 'old' field is initialized so that old state can be restored if need be. +func NewRevertState() ChangeState { + return ChangeState{ + Adds: make(Keys), + old: make(map[Key]mapStateEntry), + } } func (c *ChangeState) Empty() bool { - return len(c.Adds)+len(c.Deletes)+len(c.Old) == 0 + return len(c.Adds)+len(c.Deletes)+len(c.old) == 0 } // toMapState converts a single filter into a MapState entries added to 'p.PolicyMapState'. @@ -548,7 +558,7 @@ func (c *ChangeState) Empty() bool { // 'redirects' is the map of currently realized redirects, it is used to find the proxy port for any redirects. // p.SelectorCache is used as Identities interface during this call, which only has GetPrefix() that // needs no lock. -func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, redirects map[string]uint16, changes ChangeState) { +func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, changes ChangeState) { port := l4.Port proto := l4.U8Proto @@ -588,9 +598,10 @@ func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, redir wildcardRule = l4.PerSelectorPolicies[l4.wildcard] } + isL4Wildcard := (l4.Port != 0 || l4.PortName != "") && l4.wildcard != nil for cs, currentRule := range l4.PerSelectorPolicies { // have wildcard and this is an L3L4 key? - isL3L4withWildcardPresent := (l4.Port != 0 || l4.PortName != "") && l4.wildcard != nil && cs != l4.wildcard + isL3L4withWildcardPresent := isL4Wildcard && cs != l4.wildcard if isL3L4withWildcardPresent && wildcardRule.covers(currentRule) { logger.WithField(logfields.EndpointSelector, cs).Debug("ToMapState: Skipping L3/L4 key due to existing L4-only key") @@ -600,6 +611,7 @@ func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, redir isDenyRule := currentRule != nil && currentRule.IsDeny isRedirect := currentRule.IsRedirect() listener := currentRule.GetListener() + priority := currentRule.GetPriority() if !isDenyRule && isL3L4withWildcardPresent && !isRedirect { // Inherit the redirect status from the wildcard rule. // This is now needed as 'covers()' can pass non-redirect L3L4 rules @@ -608,27 +620,27 @@ func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, redir // L4-only rule. isRedirect = wildcardRule.IsRedirect() listener = wildcardRule.GetListener() + priority = wildcardRule.GetPriority() } hasAuth, authType := currentRule.GetAuthType() var proxyPort uint16 if isRedirect { - var exists bool - proxyID := ProxyID(uint16(p.PolicyOwner.GetID()), l4.Ingress, string(l4.Protocol), port, listener) - proxyPort, exists = redirects[proxyID] - if !exists { + var err error + proxyPort, err = p.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, listener) + if err != nil { // Skip unrealized redirects; this happens routineously just // before new redirects are realized. Once created, we are called // again. - logger.WithField(logfields.EndpointSelector, cs).Debugf("Skipping unrealized redirect %s (%v)", proxyID, redirects) + logger.WithError(err).WithField(logfields.EndpointSelector, cs).Debugf("Skipping unrealized redirect") continue } } - entry := NewMapStateEntry(cs, l4.RuleOrigin[cs], proxyPort, currentRule.GetListener(), currentRule.GetPriority(), isDenyRule, hasAuth, authType) + entry := newMapStateEntry(cs, l4.RuleOrigin[cs], proxyPort, priority, isDenyRule, hasAuth, authType) if cs.IsWildcard() { for _, keyToAdd := range keysToAdd { keyToAdd.Identity = 0 - p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, features, changes) + p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes) if port == 0 { // Allow-all @@ -660,15 +672,15 @@ func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, redir for _, id := range idents { for _, keyToAdd := range keysToAdd { keyToAdd.Identity = id - p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, features, changes) + p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes) // If Cilium is in dual-stack mode then the "World" identity // needs to be split into two identities to represent World // IPv6 and IPv4 traffic distinctly from one another. if id == identity.ReservedIdentityWorld && option.Config.IsDualStack() { keyToAdd.Identity = identity.ReservedIdentityWorldIPv4 - p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, features, changes) + p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes) keyToAdd.Identity = identity.ReservedIdentityWorldIPv6 - p.policyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, features, changes) + p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes) } } } @@ -677,7 +689,7 @@ func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, redir log.WithFields(logrus.Fields{ logfields.PolicyKeysAdded: changes.Adds, logfields.PolicyKeysDeleted: changes.Deletes, - logfields.PolicyEntriesOld: changes.Old, + logfields.PolicyEntriesOld: changes.old, }).Debug("ToMapChange changes") } } @@ -1106,7 +1118,7 @@ func addL4Filter(policyCtx PolicyContext, ctx *SearchContext, resMap L4PolicyMap, p api.PortProtocol, proto api.L4Proto, filterToMerge *L4Filter, - ruleLabels labels.LabelArray) error { +) error { existingFilter := resMap.ExactLookup(p.Port, uint16(p.EndPort), string(proto)) if existingFilter == nil { @@ -1144,16 +1156,17 @@ type L4PolicyMap interface { IngressCoversContext(ctx *SearchContext) api.Decision EgressCoversContext(ctx *SearchContext) api.Decision ForEach(func(l4 *L4Filter) bool) - Equals(t *testing.T, bMap L4PolicyMap) bool - Diff(t *testing.T, expectedMap L4PolicyMap) string + TestingOnlyEquals(bMap L4PolicyMap) bool + TestingOnlyDiff(expectedMap L4PolicyMap) string Len() int } // NewL4PolicyMap creates an new L4PolicMap. func NewL4PolicyMap() L4PolicyMap { return &l4PolicyMap{ - namedPortMap: make(map[string]*L4Filter), - rangePortMap: bitlpm.NewUintTrie[uint32, map[portProtoKey]*L4Filter](), + namedPortMap: make(map[string]*L4Filter), + rangePortMap: make(map[portProtoKey]*L4Filter), + rangePortIndex: bitlpm.NewUintTrie[uint32, map[portProtoKey]struct{}](), } } @@ -1161,8 +1174,9 @@ func NewL4PolicyMap() L4PolicyMap { // set of values. The initMap argument does not support port ranges. func NewL4PolicyMapWithValues(initMap map[string]*L4Filter) L4PolicyMap { l4M := &l4PolicyMap{ - namedPortMap: make(map[string]*L4Filter), - rangePortMap: bitlpm.NewUintTrie[uint32, map[portProtoKey]*L4Filter](), + namedPortMap: make(map[string]*L4Filter), + rangePortMap: make(map[portProtoKey]*L4Filter), + rangePortIndex: bitlpm.NewUintTrie[uint32, map[portProtoKey]struct{}](), } for k, v := range initMap { portProtoSlice := strings.Split(k, "/") @@ -1186,16 +1200,13 @@ type l4PolicyMap struct { // level, because they can only be resolved at the endpoint/identity // level. Named ports cannot have ranges. namedPortMap map[string]*L4Filter - // rangePortMap has to keep a map of L4Filters rather than - // a single L4Filter reference so that the l4PolicyMap does - // not merge L4Filter that are not the same port range, but - // share an overlapping range in the trie. - rangePortMap *bitlpm.UintTrie[uint32, map[portProtoKey]*L4Filter] - // rangeMapLen counts the number of unique L4Filters in - // the rangePortMap. It must be tracked separately from - // rangePortMap as L4Filters are split up when - // the port range length is not a power of two. - rangeMapLen int + // rangePortMap is a map of all L4Filters indexed by their port- + // protocol. + rangePortMap map[portProtoKey]*L4Filter + // rangePortIndex is an index of all L4Filters so that + // L4Filters that have overlapping port ranges can be looked up + // by with a single port. + rangePortIndex *bitlpm.UintTrie[uint32, map[portProtoKey]struct{}] } func parsePortProtocol(port, protocol string) (uint16, uint8) { @@ -1226,22 +1237,21 @@ func (l4M *l4PolicyMap) Upsert(port string, endPort uint16, protocol string, l4 endPort: endPort, proto: protoU, } - var upsertHappened bool - for _, mp := range PortRangeToMaskedPorts(portU, endPort) { - k := makePolicyMapKey(mp.port, mp.mask, protoU) - prefix := 32 - uint(bits.TrailingZeros16(mp.mask)) - portProtoMap, ok := l4M.rangePortMap.ExactLookup(prefix, k) - if !ok { - portProtoMap = make(map[portProtoKey]*L4Filter) - l4M.rangePortMap.Upsert(prefix, k, portProtoMap) - } - if !upsertHappened { - if _, ok := portProtoMap[ppK]; !ok { - l4M.rangeMapLen += 1 - upsertHappened = true + _, indexExists := l4M.rangePortMap[ppK] + l4M.rangePortMap[ppK] = l4 + // We do not need to reindex a key that already exists, + // even if the filter changed. + if !indexExists { + for _, mp := range PortRangeToMaskedPorts(portU, endPort) { + k := makePolicyMapKey(mp.port, mp.mask, protoU) + prefix := 32 - uint(bits.TrailingZeros16(mp.mask)) + portProtoSet, ok := l4M.rangePortIndex.ExactLookup(prefix, k) + if !ok { + portProtoSet = make(map[portProtoKey]struct{}) + l4M.rangePortIndex.Upsert(prefix, k, portProtoSet) } + portProtoSet[ppK] = struct{}{} } - portProtoMap[ppK] = l4 } } @@ -1258,23 +1268,21 @@ func (l4M *l4PolicyMap) Delete(port string, endPort uint16, protocol string) { endPort: endPort, proto: protoU, } - var deleteHappened bool - for _, mp := range PortRangeToMaskedPorts(portU, endPort) { - k := makePolicyMapKey(mp.port, mp.mask, protoU) - prefix := 32 - uint(bits.TrailingZeros16(mp.mask)) - portProtoMap, ok := l4M.rangePortMap.ExactLookup(prefix, k) - if !ok { - return - } - if _, ok := portProtoMap[ppK]; ok { - delete(portProtoMap, ppK) - if !deleteHappened { - l4M.rangeMapLen -= 1 - deleteHappened = true + _, indexExists := l4M.rangePortMap[ppK] + delete(l4M.rangePortMap, ppK) + // Only delete the index if the key exists. + if indexExists { + for _, mp := range PortRangeToMaskedPorts(portU, endPort) { + k := makePolicyMapKey(mp.port, mp.mask, protoU) + prefix := 32 - uint(bits.TrailingZeros16(mp.mask)) + portProtoSet, ok := l4M.rangePortIndex.ExactLookup(prefix, k) + if !ok { + return + } + delete(portProtoSet, ppK) + if len(portProtoSet) == 0 { + l4M.rangePortIndex.Delete(prefix, k) } - } - if len(portProtoMap) == 0 { - l4M.rangePortMap.Delete(prefix, k) } } } @@ -1291,18 +1299,7 @@ func (l4M *l4PolicyMap) ExactLookup(port string, endPort uint16, protocol string endPort: endPort, proto: protoU, } - for _, mp := range PortRangeToMaskedPorts(portU, endPort) { - k := makePolicyMapKey(mp.port, mp.mask, protoU) - prefix := 32 - uint(bits.TrailingZeros16(mp.mask)) - portProtoMap, ok := l4M.rangePortMap.ExactLookup(prefix, k) - if !ok { - return nil - } - if l4, ok := portProtoMap[ppK]; ok { - return l4 - } - } - return nil + return l4M.rangePortMap[ppK] } // MatchesLabels checks if a given port, protocol, and labels matches @@ -1318,13 +1315,16 @@ func (l4M *l4PolicyMap) MatchesLabels(port, protocol string, labels labels.Label portU, protoU := parsePortProtocol(port, protocol) l4PortProtoKeys := make(map[portProtoKey]struct{}) - l4M.rangePortMap.Ancestors(32, makePolicyMapKey(portU, 0xffff, protoU), - func(_ uint, _ uint32, portProtoMap map[portProtoKey]*L4Filter) bool { - for k, v := range portProtoMap { - if _, ok := l4PortProtoKeys[k]; !ok { - match, isDeny = v.matchesLabels(labels) - if isDeny { - return false + l4M.rangePortIndex.Ancestors(32, makePolicyMapKey(portU, 0xffff, protoU), + func(_ uint, _ uint32, portProtoSet map[portProtoKey]struct{}) bool { + for k := range portProtoSet { + v, ok := l4M.rangePortMap[k] + if ok { + if _, ok := l4PortProtoKeys[k]; !ok { + match, isDeny = v.matchesLabels(labels) + if isDeny { + return false + } } } } @@ -1336,23 +1336,19 @@ func (l4M *l4PolicyMap) MatchesLabels(port, protocol string, labels labels.Label // ForEach iterates over all L4Filters in the l4PolicyMap. func (l4M *l4PolicyMap) ForEach(fn func(l4 *L4Filter) bool) { for _, f := range l4M.namedPortMap { - fn(f) + if !fn(f) { + return + } } - l4PortProtoKeys := make(map[portProtoKey]struct{}) - l4M.rangePortMap.ForEach(func(prefix uint, key uint32, portPortoMap map[portProtoKey]*L4Filter) bool { - for k, v := range portPortoMap { - // We check for redundant L4Filters, because we split them apart in the index. - if _, ok := l4PortProtoKeys[k]; !ok { - fn(v) - l4PortProtoKeys[k] = struct{}{} - } + for _, v := range l4M.rangePortMap { + if !fn(v) { + return } - return true - }) + } } // Equals returns true if both L4PolicyMaps are equal. -func (l4M *l4PolicyMap) Equals(_ *testing.T, bMap L4PolicyMap) bool { +func (l4M *l4PolicyMap) TestingOnlyEquals(bMap L4PolicyMap) bool { if l4M.Len() != bMap.Len() { return false } @@ -1363,14 +1359,14 @@ func (l4M *l4PolicyMap) Equals(_ *testing.T, bMap L4PolicyMap) bool { port = fmt.Sprintf("%d", l4.Port) } l4B := bMap.ExactLookup(port, l4.EndPort, string(l4.Protocol)) - equal = l4.Equals(nil, l4B) + equal = l4.Equals(l4B) return equal }) return equal } // Diff returns the difference between to L4PolicyMaps. -func (l4M *l4PolicyMap) Diff(_ *testing.T, expected L4PolicyMap) (res string) { +func (l4M *l4PolicyMap) TestingOnlyDiff(expected L4PolicyMap) (res string) { res += "Missing (-), Unexpected (+):\n" expected.ForEach(func(eV *L4Filter) bool { port := eV.PortName @@ -1379,7 +1375,7 @@ func (l4M *l4PolicyMap) Diff(_ *testing.T, expected L4PolicyMap) (res string) { } oV := l4M.ExactLookup(port, eV.Port, string(eV.Protocol)) if oV != nil { - if !eV.Equals(nil, oV) { + if !eV.Equals(oV) { res += "- " + eV.String() + "\n" res += "+ " + oV.String() + "\n" } @@ -1407,7 +1403,7 @@ func (l4M *l4PolicyMap) Len() int { if l4M == nil { return 0 } - return len(l4M.namedPortMap) + l4M.rangeMapLen + return len(l4M.namedPortMap) + len(l4M.rangePortMap) } type policyFeatures uint8 @@ -1631,14 +1627,18 @@ func (l4Policy *L4Policy) AccumulateMapChanges(l4 *L4Filter, cs CachedSelector, var proxyPort uint16 if redirect { var err error - proxyPort, err = epPolicy.PolicyOwner.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, listener) + proxyPort, err = epPolicy.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, listener) if err != nil { - // This happens for new redirects that have not been realized - // yet. The accumulated changes should only be consumed after new - // redirects have been realized. ConsumeMapChanges then maps this - // invalid value to the real redirect port before the entry is - // visible to the endpoint package. - proxyPort = unrealizedRedirectPort + log.WithFields(logrus.Fields{ + logfields.EndpointSelector: cs, + logfields.Port: port, + logfields.Protocol: proto, + logfields.TrafficDirection: direction, + logfields.IsRedirect: redirect, + logfields.Listener: listener, + logfields.ListenerPriority: priority, + }).Warn("AccumulateMapChanges: Missing redirect.") + continue } } var keysToAdd []Key @@ -1646,7 +1646,7 @@ func (l4Policy *L4Policy) AccumulateMapChanges(l4 *L4Filter, cs CachedSelector, keysToAdd = append(keysToAdd, KeyForDirection(direction).WithPortProtoPrefix(proto, mp.port, uint8(bits.LeadingZeros16(^mp.mask)))) } - value := NewMapStateEntry(cs, derivedFrom, proxyPort, listener, priority, isDeny, hasAuth, authType) + value := newMapStateEntry(cs, derivedFrom, proxyPort, priority, isDeny, hasAuth, authType) if option.Config.Debug { authString := "default" @@ -1666,7 +1666,7 @@ func (l4Policy *L4Policy) AccumulateMapChanges(l4 *L4Filter, cs CachedSelector, logfields.ListenerPriority: priority, }).Debug("AccumulateMapChanges") } - epPolicy.policyMapChanges.AccumulateMapChanges(cs, adds, deletes, keysToAdd, value) + epPolicy.policyMapChanges.AccumulateMapChanges(adds, deletes, keysToAdd, value) } } @@ -1674,6 +1674,7 @@ func (l4Policy *L4Policy) AccumulateMapChanges(l4 *L4Filter, cs CachedSelector, func (l4Policy *L4Policy) SyncMapChanges(l4 *L4Filter, txn *versioned.Tx) { // SelectorCache may not be called into while holding this lock! l4Policy.mutex.RLock() + for epPolicy := range l4Policy.users { epPolicy.policyMapChanges.SyncMapChanges(txn) } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go index f9ac6d3d90..0fce54077d 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go @@ -5,6 +5,7 @@ package policy import ( "fmt" + "iter" "maps" "slices" "strconv" @@ -13,6 +14,7 @@ import ( "github.com/sirupsen/logrus" "github.com/cilium/cilium/pkg/container/bitlpm" + "github.com/cilium/cilium/pkg/container/set" "github.com/cilium/cilium/pkg/container/versioned" "github.com/cilium/cilium/pkg/identity" "github.com/cilium/cilium/pkg/labels" @@ -31,7 +33,10 @@ import ( // lest ye find yourself with hundreds of unnecessary imports. type Key = policyTypes.Key type Keys = policyTypes.Keys +type MapStateOwner = any // Key or CachedSelector +// Map type for external use. Internally we have more detail in private 'mapSteteEntry' type, +// as well as more extensive indexing via tries. type MapStateMap map[Key]MapStateEntry func EgressKey() policyTypes.Key { @@ -62,44 +67,14 @@ const ( LabelAllowLocalHostIngress = "allow-localhost-ingress" LabelAllowAnyIngress = "allow-any-ingress" LabelAllowAnyEgress = "allow-any-egress" - LabelVisibilityAnnotation = "visibility-annotation" - - // Using largest possible port value since it has the lowest priority - unrealizedRedirectPort = uint16(65535) ) -// MapState is a map interface for policy maps -type MapState interface { - Get(Key) (MapStateEntry, bool) - - // ForEach allows iteration over the MapStateEntries. It returns true if - // the iteration was not stopped early by the callback. - ForEach(func(Key, MapStateEntry) (cont bool)) (complete bool) - GetIdentities(*logrus.Logger) ([]int64, []int64) - GetDenyIdentities(*logrus.Logger) ([]int64, []int64) - Len() int - - // private accessors - deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool - - // - // modifiers are private - // - delete(Key) - insert(Key, MapStateEntry) - revertChanges(ChangeState) - - addVisibilityKeys(PolicyOwner, uint16, *VisibilityMetadata, ChangeState) - allowAllIdentities(ingress, egress bool) - determineAllowLocalhostIngress() - denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, features policyFeatures, changes ChangeState) - deleteKeyWithChanges(key Key, owner MapStateOwner, changes ChangeState) - - // For testing from other packages only - Equals(MapState) bool - Diff(expected MapState) string - WithState(initMap MapStateMap) MapState -} +var ( + LabelsAllowAnyIngress = labels.LabelArrayList{labels.LabelArray{ + labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved)}} + LabelsAllowAnyEgress = labels.LabelArrayList{labels.LabelArray{ + labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved)}} +) // mapState is a state of a policy map. type mapState struct { @@ -121,12 +96,12 @@ type mapState struct { // intermediate node in the Trie with its own sub node associated with // TrafficDirection, Protocol, and Port. When identity is not indexed // then one policy will map to one key-prefix with a builtin map type -// that associates each identity with a MapStateEntry. This strategy +// that associates each identity with a mapStateEntry. This strategy // greatly enhances the usefuleness of the Trie and improves lookup, // deletion, and insertion times. type mapStateMap struct { // entries is the map containing the MapStateEntries - entries MapStateMap + entries map[Key]mapStateEntry // trie is a Trie that indexes policy Keys without their identity // and stores the identities in an associated builtin map. trie bitlpm.Trie[bitlpm.Key[policyTypes.LPMKey], IDSet] @@ -134,12 +109,16 @@ type mapStateMap struct { type IDSet map[identity.NumericIdentity]struct{} -func (msm *mapStateMap) Lookup(k Key) (MapStateEntry, bool) { +func (msm *mapStateMap) Empty() bool { + return len(msm.entries) == 0 +} + +func (msm *mapStateMap) Lookup(k Key) (mapStateEntry, bool) { v, ok := msm.entries[k] return v, ok } -func (msm *mapStateMap) upsert(k Key, e MapStateEntry) { +func (msm *mapStateMap) upsert(k Key, e mapStateEntry) { _, exists := msm.entries[k] // upsert entry @@ -177,6 +156,15 @@ func (msm *mapStateMap) delete(k Key) { } func (msm *mapStateMap) ForEach(f func(Key, MapStateEntry) bool) bool { + for k, e := range msm.entries { + if !f(k, e.MapStateEntry) { + return false + } + } + return true +} + +func (msm *mapStateMap) forEach(f func(Key, mapStateEntry) bool) bool { for k, e := range msm.entries { if !f(k, e) { return false @@ -185,7 +173,7 @@ func (msm *mapStateMap) ForEach(f func(Key, MapStateEntry) bool) bool { return true } -func (msm *mapStateMap) forKey(k Key, f func(Key, MapStateEntry) bool) bool { +func (msm *mapStateMap) forKey(k Key, f func(Key, mapStateEntry) bool) bool { e, ok := msm.entries[k] if ok { return f(k, e) @@ -197,226 +185,214 @@ func (msm *mapStateMap) forKey(k Key, f func(Key, MapStateEntry) bool) bool { return true } -// ForEachNarrowerKeyWithBroaderID iterates over narrower port/proto's and broader IDs in the trie. -// Equal port/protos or identities are not included. -func (msm *mapStateMap) ForEachNarrowerKeyWithBroaderID(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Descendants(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - // k is the key from trie with 0'ed ID - k := Key{ - LPMKey: lpmKey.Value(), - } - - // Descendants iterates over equal port/proto, caller expects to see only narrower keys so skip it - if k.PortProtoIsEqual(key) { - return true - } - - // ANY identities are ancestors of all - // identities, visit them first, but not if key is also ANY - if key.Identity != 0 { - if _, exists := idSet[0]; exists { - k.Identity = 0 - if !msm.forKey(k, f) { - return false - } - } +// forDifferentKeys calls 'f' for each Key 'k' with identities in 'idSet', if different from 'key'. +func (msm *mapStateMap) forDifferentKeys(key, k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool { + for id := range idSet { + k.Identity = id + if key != k && !msm.forKey(k, f) { + return false } - return true - }) + } + return true } -// ForEachBroaderOrEqualKey iterates over broader or equal keys in the trie. -func (msm *mapStateMap) ForEachBroaderOrEqualKey(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Ancestors(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - // k is the key from trie with 0'ed ID - k := Key{ - LPMKey: lpmKey.Value(), - } - - // ANY identity is an ancestor of all identities, visit them first - if _, exists := idSet[0]; exists { - k.Identity = 0 +// forSpecificIDs calls 'f' for each non-ANY ID in 'idSet' with port/proto from 'k'. +func (msm *mapStateMap) forSpecificIDs(k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool { + for id := range idSet { + if id != 0 { + k.Identity = id if !msm.forKey(k, f) { return false } } + } + return true +} - // Need to visit all keys with the same identity - // ANY identity was already visited above - if key.Identity != 0 { - _, exists := idSet[key.Identity] - if exists { - k.Identity = key.Identity - if !msm.forKey(k, f) { - return false - } - } +// forIDs calls 'f' for each ID in 'idSet' with port/proto from 'k'. +func (msm *mapStateMap) forIDs(k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool { + for id := range idSet { + k.Identity = id + if !msm.forKey(k, f) { + return false } - return true - }) + } + return true } -func (msm *mapStateMap) forDescendantIDs(keyIdentity identity.NumericIdentity, k Key, idSet IDSet, f func(Key, MapStateEntry) bool) bool { - switch identity.NumericIdentity(keyIdentity) { - case identity.IdentityUnknown: // 0 - // All identities are descendants of ANY - for id := range idSet { - if id != 0 { - k.Identity = id - if !msm.forKey(k, f) { - return false - } - } +// forID calls 'f' for 'k' if 'k.Identity' exists in 'idSet'. +func (msm *mapStateMap) forID(k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool { + if _, exists := idSet[k.Identity]; exists { + if !msm.forKey(k, f) { + return false } } return true } -// ForEachNarrowerOrEqualKey iterates over narrower or equal keys in the trie. -func (msm *mapStateMap) ForEachNarrowerOrEqualKey(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Descendants(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - // k is the key from trie with 0'ed ID - k := Key{ - LPMKey: lpmKey.Value(), - } +// NarrowerKeysWithWildcardID iterates over ANY keys with narrower port/proto's in the trie. +// Equal port/protos are not included. +// New keys with the protocol/port of the iterated keys can be safely added during iteration as this +// operation does not change the trie, but only adds elements to the idSet that is not used after +// yielding. +func (msm *mapStateMap) NarrowerKeysWithWildcardID(key Key) iter.Seq2[Key, mapStateEntry] { + return func(yield func(Key, mapStateEntry) bool) { + iter := msm.trie.DescendantIterator(key.PrefixLength(), key) + for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() { + k := Key{LPMKey: lpmKey.Value()} - // Need to visit all keys with the same identity - _, exists := idSet[key.Identity] - if exists { - k.Identity = key.Identity - if !msm.forKey(k, f) { - return false + // Visit narrower ANY keys + if !k.PortProtoIsEqual(key) && !msm.forID(k.WithIdentity(0), idSet, yield) { + return } } - - return msm.forDescendantIDs(key.Identity, k, idSet, f) - }) + } } -// ForEachBroaderKeyWithNarrowerID iterates over broader proto/port with narrower identity in the trie. -// Equal port/protos or identities are not included. -func (msm *mapStateMap) ForEachBroaderKeyWithNarrowerID(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Ancestors(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - // k is the key from trie with 0'ed ID - k := Key{ - LPMKey: lpmKey.Value(), - } +// BroaderOrEqualKeys iterates over broader or equal (broader or equal port/proto and the same +// or wildcard ID) in the trie. +func (msm *mapStateMap) BroaderOrEqualKeys(key Key) iter.Seq2[Key, mapStateEntry] { + return func(yield func(Key, mapStateEntry) bool) { + iter := msm.trie.AncestorIterator(key.PrefixLength(), key) + for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() { + k := Key{LPMKey: lpmKey.Value()} - // Skip equal PortProto - if k.PortProtoIsEqual(key) { - return true - } + // ANY identity is broader or equal to all identities, visit it first if it exists + if !msm.forID(k.WithIdentity(0), idSet, yield) { + return + } - return msm.forDescendantIDs(key.Identity, k, idSet, f) - }) + // Visit key with the same identity, if it exists. + // ANY identity was already visited above. + if key.Identity != 0 && !msm.forID(k.WithIdentity(key.Identity), idSet, yield) { + return + } + } + } } -// ForEachBroaderOrEqualDatapathKey iterates over broader or equal keys in the trie. -// Visits all keys that datapath would match IF the 'key' was not added to the policy map. -// NOTE that CIDRs are not considered here as datapath does not support LPM matching in security IDs. -func (msm *mapStateMap) ForEachBroaderOrEqualDatapathKey(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Ancestors(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - // k is the key from trie with 0'ed ID - k := Key{ - LPMKey: lpmKey.Value(), - } +// NarrowerKeys iterates over narrower keys in the trie. +func (msm *mapStateMap) NarrowerKeys(key Key) iter.Seq2[Key, mapStateEntry] { + return func(yield func(Key, mapStateEntry) bool) { + iter := msm.trie.DescendantIterator(key.PrefixLength(), key) + for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() { + k := Key{LPMKey: lpmKey.Value()} - // ANY identities are ancestors of all identities, visit them first - if _, exists := idSet[0]; exists { - k.Identity = 0 - if !msm.forKey(k, f) { - return false + // All identities are narrower than ANY identity, visit different keys + if key.Identity == 0 { + if !msm.forDifferentKeys(key, k, idSet, yield) { + return + } + } else { // key has a specific identity + // Need to visit the key with the same identity, if PortProto is different, + // and one exists. + if !k.PortProtoIsEqual(key) && !msm.forID(k.WithIdentity(key.Identity), idSet, yield) { + return + } } } + } +} - // Need to visit all keys with the same identity - // ANY identity was already visited above - if key.Identity != 0 { - _, exists := idSet[key.Identity] - if exists { - k.Identity = key.Identity - if !msm.forKey(k, f) { - return false +// NarrowerOrEqualKeys iterates over narrower or equal keys in the trie. +// Iterated keys can be safely deleted during iteration due to DescendantIterator holding enough +// state that allows iteration to be continued even if the current trie node is removed. +func (msm *mapStateMap) NarrowerOrEqualKeys(key Key) iter.Seq2[Key, mapStateEntry] { + return func(yield func(Key, mapStateEntry) bool) { + iter := msm.trie.DescendantIterator(key.PrefixLength(), key) + for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() { + k := Key{LPMKey: lpmKey.Value()} + + // All identities are narrower or equal to ANY identity. + if key.Identity == 0 { + if !msm.forIDs(k, idSet, yield) { + return + } + } else { // key has a specific identity + // Need to visit the key with the same identity, if it exists. + if !msm.forID(k.WithIdentity(key.Identity), idSet, yield) { + return } } } - return true - }) + } } -// ForEachNarrowerOrEqualDatapathKey iterates over narrower or equal keys in the trie. -// Visits all keys that datapath matches that would match 'key' if those keys were not in the policy map. -// NOTE that CIDRs are not considered here as datapath does not support LPM matching in security IDs. -func (msm *mapStateMap) ForEachNarrowerOrEqualDatapathKey(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Descendants(key.PrefixLength(), key, func(_ uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - // k is the key from trie with 0'ed ID - k := Key{ - LPMKey: lpmKey.Value(), +// BroaderKeysWithSpecificID iterates over keys with broader proto/port and a specific +// identity in the trie. +// Equal port/protos or identities are not included. +func (msm *mapStateMap) BroaderKeysWithSpecificID(key Key) iter.Seq2[Key, mapStateEntry] { + return func(yield func(Key, mapStateEntry) bool) { + iter := msm.trie.AncestorIterator(key.PrefixLength(), key) + for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() { + k := Key{LPMKey: lpmKey.Value()} + + // Visit different keys with specific IDs + if !k.PortProtoIsEqual(key) && !msm.forSpecificIDs(k, idSet, yield) { + return + } } + } +} + +// CoveringKeys iterates over broader port/proto entries in the trie in LPM order, +// with most specific match being returned first. +func (msm *mapStateMap) CoveringKeys(key Key) iter.Seq2[Key, mapStateEntry] { + return func(yield func(Key, mapStateEntry) bool) { + iter := msm.trie.AncestorLongestPrefixFirstIterator(key.PrefixLength(), key) + for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() { + k := Key{LPMKey: lpmKey.Value()} - // All identities are descendants of ANY identity. - if key.Identity == 0 { - for id := range idSet { - k.Identity = id - if !msm.forKey(k, f) { - return false + // Visit key with the same identity, if port/proto is different. + // ANY identity is visited below. + if key.Identity != 0 && !k.PortProtoIsEqual(key) { + if !msm.forID(k.WithIdentity(key.Identity), idSet, yield) { + return } } - } - // Need to visit all keys with the same identity. - // ANY identity was already visited above. - if key.Identity != 0 { - _, exists := idSet[key.Identity] - if exists { - k.Identity = key.Identity - if !msm.forKey(k, f) { - return false + // ANY identity covers all non-ANY identities, visit them second. + // Keys with ANY identity visit ANY keys only if port/proto is different. + if key.Identity != 0 || !k.PortProtoIsEqual(key) { + if !msm.forID(k.WithIdentity(0), idSet, yield) { + return } } } - return true - }) + } } -// ForEachKeyWithBroaderOrEqualPortProto iterates over broader or equal port/proto entries in the trie. -func (msm *mapStateMap) ForEachKeyWithBroaderOrEqualPortProto(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Ancestors(key.PrefixLength(), key, func(prefix uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - k := Key{ - LPMKey: lpmKey.Value(), - } - for id := range idSet { - k.Identity = id - if !msm.forKey(k, f) { - return false - } - } - return true - }) -} +// SubsetKeys iterates over narrower or equal port/proto entries in the trie in an LPM order +// (least specific match first). +func (msm *mapStateMap) SubsetKeys(key Key) iter.Seq2[Key, mapStateEntry] { + return func(yield func(Key, mapStateEntry) bool) { + iter := msm.trie.DescendantShortestPrefixFirstIterator(key.PrefixLength(), key) + for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() { + k := Key{LPMKey: lpmKey.Value()} -// ForEachKeyWithNarrowerOrEqualPortProto iterates over narrower or equal port/proto entries in the trie. -func (msm *mapStateMap) ForEachKeyWithNarrowerOrEqualPortProto(key Key, f func(Key, MapStateEntry) bool) { - msm.trie.Descendants(key.PrefixLength(), key, func(prefix uint, lpmKey bitlpm.Key[policyTypes.LPMKey], idSet IDSet) bool { - k := Key{ - LPMKey: lpmKey.Value(), - } - for id := range idSet { - k.Identity = id - if !msm.forKey(k, f) { - return false + // For an ANY key, visit all different keys + if key.Identity == 0 { + if !msm.forDifferentKeys(key, k, idSet, yield) { + return + } + } else { // key has a specific ID + // Visit only keys with the ANY or the same ID, if they exist + if !msm.forID(k.WithIdentity(0), idSet, yield) { + return + } + // Else visit the different key with the same identity + if !k.PortProtoIsEqual(key) && !msm.forID(k.WithIdentity(key.Identity), idSet, yield) { + return + } } } - return true - }) + } } func (msm *mapStateMap) Len() int { return len(msm.entries) } -type MapStateOwner interface{} - // MapStateEntry is the configuration associated with a Key in a // MapState. This is a minimized version of policymap.PolicyEntry. type MapStateEntry struct { @@ -425,64 +401,113 @@ type MapStateEntry struct { // Key. Any other value signifies proxy redirection. ProxyPort uint16 - // priority is used to select the Listener if multiple rules would apply different listeners - // to a policy map entry. Lower numbers indicate higher priority. If left out, the proxy - // port number (10000-20000) is used. - priority uint16 - - // Listener name for proxy redirection, if any - Listener string - // IsDeny is true when the policy should be denied. IsDeny bool - // hasAuthType is 'DefaultAuthType' when policy has no explicit AuthType set. In this case the - // value of AuthType is derived from more generic entries covering this entry. - hasAuthType HasAuthType - // AuthType is non-zero when authentication is required for the traffic to be allowed. AuthType AuthType +} - // DerivedFromRules tracks the policy rules this entry derives from +// mapSteteEntry is the entry type with additional internal bookkeping of the relation between +// explicitly and implicitly added entries. +type mapStateEntry struct { + MapStateEntry + + // priority is used to select the proxy port if multiple rules would apply different proxy + // ports to a policy map entry. Lower numbers indicate higher priority. If left out, the + // proxy port number (10000-20000) is used. + priority uint16 + + // hasAuthType is 'DefaultAuthType' when policy has no explicit AuthType set. In this case + // the value of AuthType is derived from more generic entries covering this entry. + hasAuthType HasAuthType + + // derivedFromRules tracks the policy rules this entry derives from. // In sorted order. - DerivedFromRules labels.LabelArrayList + derivedFromRules labels.LabelArrayList - // Owners collects the keys in the map and selectors in the policy that require this key to be present. + // owners collects the keys in the map and selectors in the policy that require this key to be present. // TODO: keep track which selector needed the entry to be deny, redirect, or just allow. - owners map[MapStateOwner]struct{} + owners set.Set[MapStateOwner] // dependents contains the keys for entries create based on this entry. These entries // will be deleted once all of the owners are deleted. dependents Keys } -// NewMapStateEntry creates a map state entry. If redirect is true, the +// newMapStateEntry creates a map state entry. If redirect is true, the // caller is expected to replace the ProxyPort field before it is added to // the actual BPF map. // 'cs' is used to keep track of which policy selectors need this entry. If it is 'nil' this entry // will become sticky and cannot be completely removed via incremental updates. Even in this case // the entry may be overridden or removed by a deny entry. -func NewMapStateEntry(cs MapStateOwner, derivedFrom labels.LabelArrayList, proxyPort uint16, listener string, priority uint16, deny bool, hasAuth HasAuthType, authType AuthType) MapStateEntry { +func newMapStateEntry(cs MapStateOwner, derivedFrom labels.LabelArrayList, proxyPort uint16, priority uint16, deny bool, hasAuth HasAuthType, authType AuthType) mapStateEntry { if proxyPort == 0 { - listener = "" priority = 0 } else if priority == 0 { priority = proxyPort // default for tie-breaking } - return MapStateEntry{ - ProxyPort: proxyPort, - Listener: listener, + return mapStateEntry{ + MapStateEntry: MapStateEntry{ + ProxyPort: proxyPort, + IsDeny: deny, + AuthType: authType, + }, priority: priority, - DerivedFromRules: derivedFrom, - IsDeny: deny, hasAuthType: hasAuth, - AuthType: authType, - owners: map[MapStateOwner]struct{}{cs: {}}, + derivedFromRules: derivedFrom, + owners: set.NewSet(cs), } } +// dependentOf returns a new mapStateEntry that is a copy of 'e', but has 'ownerKey' as the sole +// owner, and has no dependent keys. +func (e *mapStateEntry) dependentOf(ownerKey Key) mapStateEntry { + return mapStateEntry{ + MapStateEntry: e.MapStateEntry, + priority: e.priority, + hasAuthType: e.hasAuthType, + derivedFromRules: slices.Clone(e.derivedFromRules), + owners: set.NewSet[MapStateOwner](ownerKey), + } +} + +// dependentFrom returns a new mapStateEntry that is a copy of 'e', but has 'ownerKey' as the sole +// owner, and has no dependent keys. +func (e mapStateEntry) authOverrideFrom(ownerKey Key, entry *mapStateEntry) mapStateEntry { + lbls := slices.Clone(e.derivedFromRules) + lbls.MergeSorted(entry.derivedFromRules) + + return mapStateEntry{ + MapStateEntry: e.MapStateEntry.WithAuthType(entry.AuthType), + priority: e.priority, + hasAuthType: DefaultAuthType, + derivedFromRules: lbls, + owners: set.NewSet[MapStateOwner](ownerKey), + } +} + +func (e MapStateEntry) toMapStateEntry(priority uint16, hasAuth HasAuthType, cs MapStateOwner, derivedFrom labels.LabelArrayList) mapStateEntry { + if e.ProxyPort == 0 { + priority = 0 + } else if priority == 0 { + priority = e.ProxyPort // default for tie-breaking + } + return mapStateEntry{ + MapStateEntry: e, + priority: priority, + hasAuthType: hasAuth, + derivedFromRules: derivedFrom, + owners: set.NewSet(cs), + } +} + +func (e *mapStateEntry) GetRuleLabels() labels.LabelArrayList { + return e.derivedFromRules +} + // AddDependent adds 'key' to the set of dependent keys. -func (e *MapStateEntry) AddDependent(key Key) { +func (e *mapStateEntry) AddDependent(key Key) { if e.dependents == nil { e.dependents = make(Keys, 1) } @@ -490,7 +515,7 @@ func (e *MapStateEntry) AddDependent(key Key) { } // RemoveDependent removes 'key' from the set of dependent keys. -func (e *MapStateEntry) RemoveDependent(key Key) { +func (e *mapStateEntry) RemoveDependent(key Key) { delete(e.dependents, key) // Nil the map when empty. This is mainly to make unit testing easier. if len(e.dependents) == 0 { @@ -500,33 +525,14 @@ func (e *MapStateEntry) RemoveDependent(key Key) { // HasDependent returns true if the 'key' is contained // within the set of dependent keys -func (e *MapStateEntry) HasDependent(key Key) bool { - if e.dependents == nil { - return false - } +func (e *mapStateEntry) HasDependent(key Key) bool { _, ok := e.dependents[key] return ok } -// NewMapState creates a new MapState interface -func NewMapState() MapState { - return newMapState() -} - -func (ms *mapState) WithState(initMap MapStateMap) MapState { - return ms.withState(initMap) -} - -func (ms *mapState) withState(initMap MapStateMap) *mapState { - for k, v := range initMap { - ms.insert(k, v) - } - return ms -} - func newMapStateMap() mapStateMap { return mapStateMap{ - entries: make(MapStateMap), + entries: make(map[Key]mapStateEntry), trie: bitlpm.NewTrie[policyTypes.LPMKey, IDSet](policyTypes.MapStatePrefixLen), } } @@ -540,6 +546,15 @@ func newMapState() *mapState { // Get the MapStateEntry that matches the Key. func (ms *mapState) Get(k Key) (MapStateEntry, bool) { + v, ok := ms.get(k) + if ok { + return v.MapStateEntry, ok + } + return MapStateEntry{}, false +} + +// Get the mapStateEntry that matches the Key. +func (ms *mapState) get(k Key) (mapStateEntry, bool) { if k.DestPort == 0 && k.PortPrefixLen() > 0 { log.WithFields(logrus.Fields{ logfields.Stacktrace: hclog.Stacktrace(), @@ -553,14 +568,13 @@ func (ms *mapState) Get(k Key) (MapStateEntry, bool) { return ms.allows.Lookup(k) } -// insert the Key and matcthing MapStateEntry into the -// MapState -func (ms *mapState) insert(k Key, v MapStateEntry) { +// insert the Key and MapStateEntry into the MapState +func (ms *mapState) insert(k Key, v mapStateEntry) { if k.DestPort == 0 && k.PortPrefixLen() > 0 { log.WithFields(logrus.Fields{ logfields.Stacktrace: hclog.Stacktrace(), logfields.PolicyKey: k, - }).Errorf("mapState.Get: invalid port prefix length for wildcard port") + }).Errorf("mapState.insert: invalid port prefix length for wildcard port") } if v.IsDeny { ms.allows.delete(k) @@ -571,7 +585,27 @@ func (ms *mapState) insert(k Key, v MapStateEntry) { } } -// Delete removes the Key an related MapStateEntry. +// updateExisting re-inserts an existing entry to its map, to be used to persist changes in the +// entry. +// NOTE: Only to be used when Key and v.IsDeny has not been changed! +func (ms *mapState) updateExisting(k Key, v mapStateEntry) { + if v.IsDeny { + ms.denies.entries[k] = v + } else { + ms.allows.entries[k] = v + } +} + +// deleteExisting removes the Key an related MapStateEntry. +func (ms *mapState) deleteExisting(k Key, v mapStateEntry) { + if v.IsDeny { + ms.denies.delete(k) + } else { + ms.allows.delete(k) + } +} + +// delete removes the Key and related MapStateEntry. func (ms *mapState) delete(k Key) { ms.allows.delete(k) ms.denies.delete(k) @@ -583,32 +617,93 @@ func (ms *mapState) ForEach(f func(Key, MapStateEntry) (cont bool)) (complete bo return ms.allows.ForEach(f) && ms.denies.ForEach(f) } +// Empty returns 'true' if there are no entries in the map +func (ms *mapState) Empty() bool { + return ms.allows.Len() == 0 && ms.denies.Len() == 0 +} + +// forEach iterates over every Key MapStateEntry and stops when the function +// argument returns false. It returns false iff the iteration was cut short. +// Used for testing. +func (ms *mapState) forEach(f func(Key, mapStateEntry) (cont bool)) (complete bool) { + return ms.allows.forEach(f) && ms.denies.forEach(f) +} + // Len returns the length of the map func (ms *mapState) Len() int { return ms.allows.Len() + ms.denies.Len() } -// Equals determines if this MapState is equal to the -// argument MapState -// Only used for testing, but also from the endpoint package! -func (msA *mapState) Equals(msB MapState) bool { +// equalsWithLabels determines if this mapState is equal to the +// argument MapState. Only compares the exported MapStateEntry and derivedFromLabels. +// Only used for testing. +func (msA *mapState) equalsWithLabels(msB *mapState) bool { if msA.Len() != msB.Len() { return false } - return msA.ForEach(func(kA Key, vA MapStateEntry) bool { - vB, ok := msB.Get(kA) + return msA.forEach(func(kA Key, vA mapStateEntry) bool { + vB, ok := msB.get(kA) return ok && (&vB).DatapathAndDerivedFromEqual(&vA) }) } +// Equals determines if this MapState is equal to the +// argument (exported) MapStateMap +// Only used for testing from other packages. +func (msA *mapState) Equals(msB MapStateMap) bool { + if msA.Len() != len(msB) { + return false + } + return msA.forEach(func(kA Key, vA mapStateEntry) bool { + vB, ok := msB[kA] + return ok && vB == vA.MapStateEntry + }) +} + +// deepEquals determines if this MapState is equal to the argument MapState. +// Only used for testing. +func (msA *mapState) deepEquals(msB *mapState) bool { + if msA.Len() != msB.Len() { + return false + } + return msA.forEach(func(kA Key, vA mapStateEntry) bool { + vB, ok := msB.get(kA) + return ok && (&vB).deepEqual(&vA) + }) +} + // Diff returns the string of differences between 'obtained' and 'expected' prefixed with // '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively. +// For use in debugging from other packages. +func (obtained *mapState) Diff(expected MapStateMap) (res string) { + res += "Missing (-), Unexpected (+):\n" + for kE, vE := range expected { + if vO, ok := obtained.get(kE); ok { + if vO.MapStateEntry != vE { + res += "- " + kE.String() + ": " + vE.String() + "\n" + res += "+ " + kE.String() + ": " + vO.MapStateEntry.String() + "\n" + } + } else { + res += "- " + kE.String() + ": " + vE.String() + "\n" + } + } + obtained.ForEach(func(kO Key, vO MapStateEntry) bool { + if _, ok := expected[kO]; !ok { + res += "+ " + kO.String() + ": " + vO.String() + "\n" + } + return true + }) + return res +} + +// diff returns the string of differences between 'obtained' and 'expected' prefixed with +// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively. // For use in debugging. -func (obtained *mapState) Diff(expected MapState) (res string) { +func (obtained *mapState) diff(expected *mapState) (res string) { res += "Missing (-), Unexpected (+):\n" - expected.ForEach(func(kE Key, vE MapStateEntry) bool { - if vO, ok := obtained.Get(kE); ok { - if !(&vO).DatapathAndDerivedFromEqual(&vE) { + expected.forEach(func(kE Key, vE mapStateEntry) bool { + if vO, ok := obtained.get(kE); ok { + if !(&vO).deepEqual(&vE) { res += "- " + kE.String() + ": " + vE.String() + "\n" res += "+ " + kE.String() + ": " + vO.String() + "\n" } @@ -617,9 +712,9 @@ func (obtained *mapState) Diff(expected MapState) (res string) { } return true }) - obtained.ForEach(func(kE Key, vE MapStateEntry) bool { - if _, ok := expected.Get(kE); !ok { - res += "+ " + kE.String() + ": " + vE.String() + "\n" + obtained.forEach(func(kO Key, vO mapStateEntry) bool { + if _, ok := expected.get(kO); !ok { + res += "+ " + kO.String() + ": " + vO.String() + "\n" } return true }) @@ -635,14 +730,13 @@ func (ms *mapState) AddDependent(owner Key, dependent Key, changes ChangeState) } } -// addDependentOnEntry adds 'dependent' to the set of dependent keys of 'e'. -func (ms *mapState) addDependentOnEntry(owner Key, e MapStateEntry, dependent Key, changes ChangeState) { +// addDependentOnEntry adds 'dependent' to the set of dependent keys of 'e', where 'e' already +// exists in 'ms'. +func (ms *mapState) addDependentOnEntry(owner Key, e mapStateEntry, dependent Key, changes ChangeState) { if _, exists := e.dependents[dependent]; !exists { - if changes.Old != nil { - changes.Old[owner] = e - } + changes.insertOldIfNotExists(owner, e) e.AddDependent(dependent) - ms.insert(owner, e) + ms.updateExisting(owner, e) } } @@ -665,17 +759,17 @@ func (ms *mapState) RemoveDependent(owner Key, dependent Key, changes ChangeStat } } -// Merge adds owners, dependents, and DerivedFromRules from a new 'entry' to an existing +// merge adds owners, dependents, and DerivedFromRules from a new 'entry' to an existing // entry 'e'. 'entry' is not modified. // Merge is only called if both entries are allow or deny entries, so deny precedence is not // considered here. // ProxyPort, and AuthType are merged by giving precedence to proxy redirection over no proxy // redirection, and explicit auth type over default auth type. -func (e *MapStateEntry) Merge(entry *MapStateEntry) { +func (e *mapStateEntry) merge(entry *mapStateEntry) { // Bail out loudly if both entries are not denies or allows if e.IsDeny != entry.IsDeny { log.WithField(logfields.Stacktrace, hclog.Stacktrace()). - Errorf("MapStateEntry.Merge: both entries must be allows or denies") + Errorf("MapStateEntry.merge: both entries must be allows or denies") return } // Only allow entries have proxy redirection or auth requirement @@ -687,7 +781,6 @@ func (e *MapStateEntry) Merge(entry *MapStateEntry) { // Proxy port value is the tie-breaker when priorities have the same value. if !e.IsRedirectEntry() || entry.priority < e.priority || entry.priority == e.priority && entry.ProxyPort < e.ProxyPort { e.ProxyPort = entry.ProxyPort - e.Listener = entry.Listener e.priority = entry.priority } } @@ -708,12 +801,7 @@ func (e *MapStateEntry) Merge(entry *MapStateEntry) { } } - if e.owners == nil && len(entry.owners) > 0 { - e.owners = make(map[MapStateOwner]struct{}, len(entry.owners)) - } - for k, v := range entry.owners { - e.owners[k] = v - } + e.owners.Merge(entry.owners) // merge dependents for k := range entry.dependents { @@ -721,8 +809,8 @@ func (e *MapStateEntry) Merge(entry *MapStateEntry) { } // merge DerivedFromRules - if len(entry.DerivedFromRules) > 0 { - e.DerivedFromRules.MergeSorted(entry.DerivedFromRules) + if len(entry.derivedFromRules) > 0 { + e.derivedFromRules.MergeSorted(entry.derivedFromRules) } } @@ -731,55 +819,37 @@ func (e *MapStateEntry) IsRedirectEntry() bool { return e.ProxyPort != 0 } -// DatapathEqual returns true of two entries are equal in the datapath's PoV, -// i.e., IsDeny, ProxyPort and AuthType are the same for both entries. -func (e *MapStateEntry) DatapathEqual(o *MapStateEntry) bool { - if e == nil || o == nil { - return e == o - } - - return e.IsDeny == o.IsDeny && e.ProxyPort == o.ProxyPort && e.AuthType == o.AuthType -} - // DatapathAndDerivedFromEqual returns true of two entries are equal in the datapath's PoV, // i.e., IsDeny, ProxyPort and AuthType are the same for both entries, and the DerivedFromRules // fields are also equal. // This is used for testing only via mapState.Equal and mapState.Diff. -func (e *MapStateEntry) DatapathAndDerivedFromEqual(o *MapStateEntry) bool { +func (e *mapStateEntry) DatapathAndDerivedFromEqual(o *mapStateEntry) bool { if e == nil || o == nil { return e == o } - return e.IsDeny == o.IsDeny && e.ProxyPort == o.ProxyPort && e.AuthType == o.AuthType && - e.DerivedFromRules.DeepEqual(&o.DerivedFromRules) + return e.MapStateEntry == o.MapStateEntry && e.derivedFromRules.DeepEqual(&o.derivedFromRules) } // DeepEqual is a manually generated deepequal function, deeply comparing the // receiver with other. in must be non-nil. // Defined manually due to deepequal-gen not supporting interface types. -// 'cachedNets' member is ignored in comparison, as it is a cached value and -// makes no functional difference. -func (e *MapStateEntry) DeepEqual(o *MapStateEntry) bool { - if !e.DatapathEqual(o) { +func (e *mapStateEntry) deepEqual(o *mapStateEntry) bool { + if e.MapStateEntry != o.MapStateEntry { return false } - if e.Listener != o.Listener || e.priority != o.priority { + if e.priority != o.priority { return false } - if !e.DerivedFromRules.DeepEqual(&o.DerivedFromRules) { + if !e.derivedFromRules.DeepEqual(&o.derivedFromRules) { return false } - if len(e.owners) != len(o.owners) { + if !e.owners.Equal(o.owners) { return false } - for k := range o.owners { - if _, exists := e.owners[k]; !exists { - return false - } - } if len(e.dependents) != len(o.dependents) { return false @@ -790,65 +860,64 @@ func (e *MapStateEntry) DeepEqual(o *MapStateEntry) bool { } } - // ignoring cachedNets - return true } +func (e MapStateEntry) WithAuthType(authType AuthType) MapStateEntry { + e.AuthType = authType + return e +} + // String returns a string representation of the MapStateEntry func (e MapStateEntry) String() string { return "ProxyPort=" + strconv.FormatUint(uint64(e.ProxyPort), 10) + - ",Listener=" + e.Listener + ",IsDeny=" + strconv.FormatBool(e.IsDeny) + - ",AuthType=" + e.AuthType.String() + - ",DerivedFromRules=" + fmt.Sprintf("%v", e.DerivedFromRules) + ",AuthType=" + e.AuthType.String() } -// denyPreferredInsert inserts a key and entry into the map by given preference -// to deny entries, and L3-only deny entries over L3-L4 allows. -// This form may be used when a full policy is computed and we are not yet interested -// in accumulating incremental changes. -// Caller may insert the same MapStateEntry multiple times for different Keys, but all from the same -// owner. -func (ms *mapState) denyPreferredInsert(newKey Key, newEntry MapStateEntry, features policyFeatures) { - // Enforce nil values from NewMapStateEntry - newEntry.dependents = nil - - ms.denyPreferredInsertWithChanges(newKey, newEntry, features, ChangeState{}) +// String returns a string representation of the MapStateEntry +func (e mapStateEntry) String() string { + return e.MapStateEntry.String() + + ",derivedFromRules=" + fmt.Sprintf("%v", e.derivedFromRules) + + ",priority=" + strconv.FormatUint(uint64(e.priority), 10) + + ",owners=" + e.owners.String() + + ",dependents=" + fmt.Sprintf("%v", e.dependents) } // addKeyWithChanges adds a 'key' with value 'entry' to 'keys' keeping track of incremental changes in 'adds' and 'deletes', and any changed or removed old values in 'old', if not nil. -func (ms *mapState) addKeyWithChanges(key Key, entry MapStateEntry, changes ChangeState) { +func (ms *mapState) addKeyWithChanges(key Key, entry mapStateEntry, changes ChangeState) bool { // Keep all owners that need this entry so that it is deleted only if all the owners delete their contribution var datapathEqual bool - oldEntry, exists := ms.Get(key) + oldEntry, exists := ms.get(key) // Only merge if both old and new are allows or denies if exists && (oldEntry.IsDeny == entry.IsDeny) { // Do nothing if entries are equal - if entry.DeepEqual(&oldEntry) { - return // nothing to do + if entry.deepEqual(&oldEntry) { + return false // nothing to do } // Save old value before any changes, if desired - if changes.Old != nil { - changes.insertOldIfNotExists(key, oldEntry) - } + changes.insertOldIfNotExists(key, oldEntry) // Compare for datapath equalness before merging, as the old entry is updated in // place! - datapathEqual = oldEntry.DatapathEqual(&entry) - - oldEntry.Merge(&entry) - ms.insert(key, oldEntry) + datapathEqual = oldEntry.MapStateEntry == entry.MapStateEntry + oldEntry.merge(&entry) + ms.updateExisting(key, oldEntry) } else if !exists || entry.IsDeny { // Insert a new entry if one did not exist or a deny entry is overwriting an allow // entry. - // Newly inserted entries must have their own containers, so that they - // remain separate when new owners/dependents are added to existing entries - entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules) - entry.owners = maps.Clone(entry.owners) - entry.dependents = maps.Clone(entry.dependents) + + // Save old value before any changes, if any + if exists { + changes.insertOldIfNotExists(key, oldEntry) + } + + // Callers already have cloned the containers, no need to do it again here ms.insert(key, entry) + } else { + // Do not record and incremental add if nothing was done + return false } // Record an incremental Add if desired and entry is new or changed @@ -859,30 +928,38 @@ func (ms *mapState) addKeyWithChanges(key Key, entry MapStateEntry, changes Chan delete(changes.Deletes, key) } } + + return true } -// deleteKeyWithChanges deletes a 'key' from 'keys' keeping track of incremental changes in 'adds' and 'deletes'. -// The key is unconditionally deleted if 'cs' is nil, otherwise only the contribution of this 'cs' is removed. +// deleteKeyWithChanges deletes a 'key' from 'keys' keeping track of incremental changes in 'adds' +// and 'deletes'. +// The key is unconditionally deleted if 'owner' is nil, otherwise only the contribution of this +// 'owner' is removed. func (ms *mapState) deleteKeyWithChanges(key Key, owner MapStateOwner, changes ChangeState) { - if entry, exists := ms.Get(key); exists { + if entry, exists := ms.get(key); exists { // Save old value before any changes, if desired oldAdded := changes.insertOldIfNotExists(key, entry) if owner != nil { - // remove the contribution of the given selector only - if _, exists = entry.owners[owner]; exists { - // Remove the contribution of this selector from the entry - delete(entry.owners, owner) + if entry.owners.Has(owner) { + // remove this owner from entry's owners + changed := entry.owners.Remove(owner) + // Remove the dependency from the owner Key if ownerKey, ok := owner.(Key); ok { ms.RemoveDependent(ownerKey, key, changes) } // key is not deleted if other owners still need it - if len(entry.owners) > 0 { + if entry.owners.Len() > 0 { + if changed { + // re-insert entry due to owner change + ms.updateExisting(key, entry) + } return } } else { // 'owner' was not found, do not change anything if oldAdded { - delete(changes.Old, key) + delete(changes.old, key) } return } @@ -892,12 +969,8 @@ func (ms *mapState) deleteKeyWithChanges(key Key, owner MapStateOwner, changes C // Owner is nil when deleting more specific entries (e.g., L3/L4) when // adding deny entries that cover them (e.g., L3-deny). if owner == nil { - for owner := range entry.owners { - if owner != nil { - if ownerKey, ok := owner.(Key); ok { - ms.RemoveDependent(ownerKey, key, changes) - } - } + for ownerKey := range set.MembersOfType[Key](entry.owners) { + ms.RemoveDependent(ownerKey, key, changes) } } @@ -913,8 +986,8 @@ func (ms *mapState) deleteKeyWithChanges(key Key, owner MapStateOwner, changes C } } - ms.allows.delete(key) - ms.denies.delete(key) + // delete entry from the map it exists in + ms.deleteExisting(key, entry) } } @@ -926,321 +999,161 @@ func (ms *mapState) revertChanges(changes ChangeState) { ms.denies.delete(k) } // 'old' contains all the original values of both modified and deleted entries - for k, v := range changes.Old { + for k, v := range changes.old { ms.insert(k, v) } } -// denyPreferredInsertWithChanges contains the most important business logic for policy insertions. It inserts -// a key and entry into the map by giving preference to deny entries, and L3-only deny entries over L3-L4 allows. -// Incremental changes performed are recorded in 'adds' and 'deletes', if not nil. -// See https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw#gid=2109052536 for details -func (ms *mapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, features policyFeatures, changes ChangeState) { - // Sanity check on the newKey - if newKey.TrafficDirection() >= trafficdirection.Invalid { - log.WithFields(logrus.Fields{ - logfields.Stacktrace: hclog.Stacktrace(), - logfields.TrafficDirection: newKey.TrafficDirection, - }).Errorf("mapState.denyPreferredInsertWithChanges: invalid traffic direction in key") - return +// insertDependentOfKey adds a dependent entry to 'k' with the more specific port/proto of 'newKey' +// to ensure 'v' takes precedence. +// Called only for 'k' with specific identity and with broader protocol/port than l4-only 'newKey'. +func (ms *mapState) insertDependentOfKey(k Key, v mapStateEntry, newKey Key, changes ChangeState) { + // Compute narrower 'key' with identity of 'k' + key := newKey.WithIdentity(k.Identity) + if ms.addKeyWithChanges(key, v.dependentOf(k), changes) { + ms.addDependentOnEntry(k, v, key, changes) } - // Skip deny rules processing if the policy in this direction has no deny rules - if !features.contains(denyRules) { - ms.authPreferredInsert(newKey, newEntry, features, changes) - return +} + +// insertDependentOfNewKey adds a dependent entry to 'newKey' with the more specific port/proto of +// 'k' to ensure 'newEntry' takes precedence. +// Called only for L4-only 'k' with narrower protocol/port than 'newKey' with a specific identity. +func (ms *mapState) insertDependentOfNewKey(newKey Key, newEntry *mapStateEntry, k Key, changes ChangeState) { + // Compute narrower 'key' with identity of 'newKey' + key := k.WithIdentity(newKey.Identity) + if ms.addKeyWithChanges(key, newEntry.dependentOf(newKey), changes) { + newEntry.AddDependent(key) } +} - // If we have a deny "all" we don't accept any kind of map entry. - if _, ok := ms.denies.Lookup(allKey[newKey.TrafficDirection()]); ok { - return +// insertAuthOverrideFromKey adds a dependent entry to 'k' with the more specific port/proto of +// 'newKey' and with override auth type from 'v' to ensure auth type of 'v' is used for identity of +// 'k' also when the traffic matches the L4-only 'newKey'. +// Called only for 'k' with specific identity and with broader protocol/port than L4-only 'newKey'. +func (ms *mapState) insertAuthOverrideFromKey(k Key, v mapStateEntry, newKey Key, newEntry mapStateEntry, changes ChangeState) { + // Compute narrower 'key' with identity of 'k' + key := newKey.WithIdentity(k.Identity) + if ms.addKeyWithChanges(key, newEntry.authOverrideFrom(k, &v), changes) { + ms.addDependentOnEntry(k, v, key, changes) } +} - // Since bpf datapath denies by default, we only need to add deny entries to carve out more - // specific holes to less specific allow rules. But since we don't if allow entries will be - // added later (e.g., incrementally due to FQDN rules), we must generally add deny entries - // even if there are no allow entries yet. - - // Datapath matches security IDs exactly, or completely wildcards them (ID == 0). Datapath - // has no LPM/CIDR logic for security IDs. We use LPM/CIDR logic here to find out if allow - // entries are "covered" by deny entries and change them to deny entries if so. We can not - // rely on the default deny as a broad allow could be added later. - - // We cannot update the map while we are - // iterating through it, so we record the - // changes to be made and then apply them. - // Additionally, we need to perform deletes - // first so that deny entries do not get - // merged with allows that are set to be - // deleted. - var ( - updates []MapChange - deletes []Key - ) - if newEntry.IsDeny { - // Test for bailed case first so that we avoid unnecessary computation if entry is - // not going to be added. - bailed := false - // If there is an ANY or equal deny key, then do not add a more specific one. - // A narrower of two deny keys is redundant in the datapath only if the broader ID - // is 0, or the IDs are the same. This is because the ID will be assigned from the - // ipcache and datapath has no notion of one ID being related to another. - ms.denies.ForEachBroaderOrEqualDatapathKey(newKey, func(k Key, v MapStateEntry) bool { - // Identical key needs to be added if the entries are different (to merge - // them). - if k != newKey || v.DeepEqual(&newEntry) { - // If the ID of this iterated-deny-entry is ANY or equal of - // the new-entry and the iterated-deny-entry has a broader (or - // equal) port-protocol then we need not insert the new entry. - bailed = true - return false +// insertAuthOverrideKey adds a dependent entry to 'newKey' with the more specific port/proto of 'k' +// and with override auth type from 'newEntry' to ensure auth type of 'newEntry' is used for +// identity of 'newKey' also when the traffic matches the L4-only 'k'. +// Called only for L4-only 'k' with narrower protocol/port than 'newKey' with a specific identity. +func (ms *mapState) insertAuthOverrideFromNewKey(newKey Key, newEntry *mapStateEntry, k Key, v mapStateEntry, changes ChangeState) { + // Compute narrower 'key' with identity of 'newKey' + key := k.WithIdentity(newKey.Identity) + if ms.addKeyWithChanges(key, v.authOverrideFrom(newKey, newEntry), changes) { + newEntry.AddDependent(key) + } +} + +func (ms *mapState) insertWithChanges(key Key, entry mapStateEntry, features policyFeatures, changes ChangeState) { + ms.denyPreferredInsertWithChanges(key, entry, features, changes) +} + +// denyPreferredInsertWithChanges contains the most important business logic for policy +// insertions. It inserts a key and entry into the map by giving preference to deny entries, and +// L3-only deny entries over L3-L4 allows. +// +// Since bpf datapath denies by default, we only need to add deny entries to carve out more specific +// holes to less specific allow rules. But since we don't if allow entries will be added later +// (e.g., incrementally due to FQDN rules), we must generally add deny entries even if there are no +// allow entries yet. +// +// Note on bailed or deleted entries: In general, if we bail out due to being covered by an existing +// entry, or delete an entry due to being covered by the new one, we would want this action reversed +// if the existing entry or this new one is incremantally removed, respectively. +// Generally, whenever a deny entry covers an allow entry (i.e., covering key has broader or equal +// protocol/port, and the keys have the same identity, or the covering key has wildcard identity (ID +// == 0)). +// Secondly, only keys with a specific identity (ID != 0) can be incrementally added or deleted. +// Finally, due to the selector cache being transactional, when an identity is removed, all keys +// with that identity are incrementally deleted. +// Hence, if a covering key is incrementally deleted, it is a key with a specific identity, and all +// keys covered by it will be deleted as well, so there is no situation where this bailed-out or +// deleted key should be reinstated due to the covering key being incrementally deleted. +// +// Note on added dependent L3/4 entries: Since the datapath always gives precedence to the matching +// entry with the most specific L4 (narrower protocol/port), we need to add L3/4 entries e.g., when +// precedence would be given to a narrower allow entry with the wildcard identity (L4-only key), +// while precedence should be given to the deny entry with a specific identity and broader L4 when +// the given packet matches both of them. To force the datapath to give precedence to the deny entry +// we add a new dependent deny entry with the identity of the (broader) deny entry and the L4 +// protocol and port of the (narrower) L4-only key. The added key is marked as a dependent entry of +// the key with a specific identity (rather than the l4-only key), so that the dependent added entry +// is also deleted when the identity of its owner key is (incrementally) removed. +// +// Incremental changes performed are recorded in 'changes'. +func (ms *mapState) denyPreferredInsertWithChanges(newKey Key, newEntry mapStateEntry, features policyFeatures, changes ChangeState) { + // Bail if covered by a deny key + if !ms.denies.Empty() { + for k := range ms.denies.BroaderOrEqualKeys(newKey) { + // Identical deny key needs to be added to merge their entries. + if k != newKey || !newEntry.IsDeny { + return } - return true - }) - if bailed { - return } + } - // Deny takes precedence for the port/proto of the newKey - // for each allow with broader port/proto and narrower ID. - ms.allows.ForEachBroaderKeyWithNarrowerID(newKey, func(k Key, v MapStateEntry) bool { - // If newKey is a superset of the iterated allow key and newKey has - // a less specific port-protocol than the iterated allow key then an - // additional deny entry with port/proto of newKey and with the - // identity of the iterated allow key must be added. - denyKeyCpy := newKey - denyKeyCpy.Identity = k.Identity - l3l4DenyEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled) - updates = append(updates, MapChange{ - Add: true, - Key: denyKeyCpy, - Value: l3l4DenyEntry, - }) - return true - }) - - // Only a non-wildcard key can have a wildcard superset key - if newKey.Identity != 0 { - ms.allows.ForEachNarrowerKeyWithBroaderID(newKey, func(k Key, v MapStateEntry) bool { - // If this iterated-allow-entry is a wildcard superset of the new-entry - // and it has a more specific port-protocol than the new-entry - // then an additional copy of the new deny entry with the more - // specific port-protocol of the iterated-allow-entry must be inserted. - if k.Identity != 0 { - return true // skip non-wildcard - } - newKeyCpy := k - newKeyCpy.Identity = newKey.Identity - l3l4DenyEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled) - updates = append(updates, MapChange{ - Add: true, - Key: newKeyCpy, - Value: l3l4DenyEntry, - }) - return true - }) + if newEntry.IsDeny { + // Delete covered allow entries. + for k := range ms.allows.NarrowerOrEqualKeys(newKey) { + ms.deleteKeyWithChanges(k, nil, changes) + } + // Delete covered deny entries, except for identical keys that need to be merged. + for k := range ms.denies.NarrowerKeys(newKey) { + ms.deleteKeyWithChanges(k, nil, changes) } - ms.allows.ForEachNarrowerOrEqualKey(newKey, func(k Key, v MapStateEntry) bool { - // If newKey has a broader (or equal) port-protocol and the newKey's - // identity is a superset (or same) of the iterated identity, then we should - // either delete the iterated-allow-entry (if the identity is the same or - // the newKey is L3 wildcard), or change it to a deny entry otherwise - if newKey.Identity == 0 || newKey.Identity == k.Identity { - deletes = append(deletes, k) - } else { - // When newKey.Identity is not ANY and is different from the subset - // key, we must keep the subset key and make it a deny instead. - // Note that these security identities have no numerical relation to - // each other (e.g, they could be any numbers X and Y) and the - // datapath does an exact match on them. - l3l4DenyEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled) - updates = append(updates, MapChange{ - Add: true, - Key: k, - Value: l3l4DenyEntry, - }) - } - return true - }) - // Not adding the new L3/L4 deny entries yet so that we do not need to worry about - // them below. - - ms.denies.ForEachNarrowerOrEqualDatapathKey(newKey, func(k Key, v MapStateEntry) bool { - // Identical key needs to remain if owners are different to merge them - if k != newKey || v.DeepEqual(&newEntry) { - // If this iterated-deny-entry is a subset (or equal) of the - // new-entry and the new-entry has a broader (or equal) - // port-protocol the newKey will match all the packets the iterated - // key would, given that there are no more specific or L4-only allow - // entries, and then we can delete the iterated-deny-entry. - deletes = append(deletes, k) + // Add L3/4 deny entry for each more specific allow key with the wildcard identity + // as the more specific allow would otherwise take precedence in the datapath over + // the less specific 'newKey' with a specific identity. + // + // Skip when 'newKey' has no port wildcarding, as then there can't be any narrower + // keys. + if newKey.Identity != 0 && newKey.HasPortWildcard() { + for k := range ms.allows.NarrowerKeysWithWildcardID(newKey) { + ms.insertDependentOfNewKey(newKey, &newEntry, k, changes) } - return true - }) - - for _, key := range deletes { - ms.deleteKeyWithChanges(key, nil, changes) - } - for _, update := range updates { - ms.addKeyWithChanges(update.Key, update.Value, changes) - // L3-only entries can be deleted incrementally so we need to track their - // effects on other entries so that those effects can be reverted when the - // identity is removed. - newEntry.AddDependent(update.Key) } - ms.addKeyWithChanges(newKey, newEntry, changes) } else { + // newEntry is an allow entry. // NOTE: We do not delete redundant allow entries. - var dependents []MapChange - - // Test for bailed case first so that we avoid unnecessary computation if entry is - // not going to be added, or is going to be changed to a deny entry. - bailed := false - insertAsDeny := false - var denyEntry MapStateEntry - ms.denies.ForEachBroaderOrEqualKey(newKey, func(k Key, v MapStateEntry) bool { - // If the iterated-deny-entry is a wildcard or has the same identity then it - // can be bailed out. - if k.Identity == 0 || k.Identity == newKey.Identity { - bailed = true - return false - } - // if any deny key covers this new allow key, then it needs to be inserted - // as deny, if not bailed out. - if !insertAsDeny { - insertAsDeny = true - denyEntry = NewMapStateEntry(k, v.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled) - } else { - // Collect the owners and labels of all the contributing deny rules - denyEntry.Merge(&v) - } - return true - }) - if bailed { - return - } - if insertAsDeny { - ms.authPreferredInsert(newKey, denyEntry, features, changes) - return - } - // Deny takes precedence for the identity of the newKey and the port/proto of the - // iterated narrower port/proto due to broader ID (CIDR or ANY) - ms.denies.ForEachNarrowerKeyWithBroaderID(newKey, func(k Key, v MapStateEntry) bool { - // If the new-entry is a subset of the iterated-deny-entry - // and the new-entry has a less specific port-protocol than the - // iterated-deny-entry then an additional copy of the iterated-deny-entry - // with the identity of the new-entry must be added. - denyKeyCpy := k - denyKeyCpy.Identity = newKey.Identity - l3l4DenyEntry := NewMapStateEntry(k, v.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled) - updates = append(updates, MapChange{ - Add: true, - Key: denyKeyCpy, - Value: l3l4DenyEntry, - }) - // L3-only entries can be deleted incrementally so we need to track their - // effects on other entries so that those effects can be reverted when the - // identity is removed. - dependents = append(dependents, MapChange{ - Key: k, - Value: v, - }) - return true - }) - - if newKey.Identity == 0 { - ms.denies.ForEachBroaderKeyWithNarrowerID(newKey, func(k Key, v MapStateEntry) bool { - // If the new-entry is a wildcard superset of the iterated-deny-entry - // and the new-entry has a more specific port-protocol than the - // iterated-deny-entry then an additional copy of the iterated-deny-entry - // with the more specific port-porotocol of the new-entry must - // be added. - denyKeyCpy := newKey - denyKeyCpy.Identity = k.Identity - l3l4DenyEntry := NewMapStateEntry(k, v.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled) - updates = append(updates, MapChange{ - Add: true, - Key: denyKeyCpy, - Value: l3l4DenyEntry, - }) - // L3-only entries can be deleted incrementally so we need to track their - // effects on other entries so that those effects can be reverted when the - // identity is removed. - dependents = append(dependents, MapChange{ - Key: k, - Value: v, - }) - return true - }) + // Avoid allocs in this block if there are no deny enties + if !ms.denies.Empty() { + // Add L3/4 deny entries for broader deny keys with a specific identity as + // the narrower L4-only allow would otherwise take precedence in the + // datapath. + if newKey.Identity == 0 && newKey.Nexthdr != 0 { // L4-only newKey + for k, v := range ms.denies.BroaderKeysWithSpecificID(newKey) { + ms.insertDependentOfKey(k, v, newKey, changes) + } + } } - for i, update := range updates { - if update.Add { - ms.addKeyWithChanges(update.Key, update.Value, changes) - dep := dependents[i] - ms.addDependentOnEntry(dep.Key, dep.Value, update.Key, changes) - } + // Checking for auth feature here is faster than calling 'authPreferredInsert' and + // checking for it there. + if features.contains(authRules) { + ms.authPreferredInsert(newKey, newEntry, changes) + return } - ms.authPreferredInsert(newKey, newEntry, features, changes) } + + ms.addKeyWithChanges(newKey, newEntry, changes) } -// IsSuperSetOf checks if the receiver Key is a superset of the argument Key, and returns a -// specificity score of the receiver key (higher score is more specific), if so. Being a superset -// means that the receiver key would match all the traffic of the argument key without being the -// same key. Hence, a L3-only key is not a superset of a L4-only key, as the L3-only key would match -// the traffic for the given L3 only, while the L4-only key matches traffic on the given port for -// all the L3's. -// Returns 0 if the receiver key is not a superset of the argument key. -// -// Specificity score for all possible superset wildcard patterns. Datapath requires proto to be specified if port is specified. -// x. L3/proto/port -// 1. */*/* -// 2. */proto/* -// 3. */proto/port -// 4. ID/*/* -// 5. ID/proto/* -// ( ID/proto/port can not be superset of anything ) -func IsSuperSetOf(k, other Key) int { - if k.TrafficDirection() != other.TrafficDirection() { - return 0 // TrafficDirection must match for 'k' to be a superset of 'other' - } - if k.Identity == 0 { - if other.Identity == 0 { - if k.Nexthdr == 0 { // k.DestPort == 0 is implied - if other.Nexthdr != 0 { - return 1 // */*/* is a superset of */proto/x - } // else both are */*/* - } else if k.Nexthdr == other.Nexthdr { - if k.PortIsBroader(other) { - return 2 // */proto/* is a superset of */proto/port - } // else more specific or different ports - } // else more specific or different protocol - } else { - // Wildcard L3 is a superset of a specific L3 only if wildcard L3 is also wildcard L4, or the L4's match between the keys - if k.Nexthdr == 0 { // k.DestPort == 0 is implied - return 1 // */*/* is a superset of ID/x/x - } else if k.Nexthdr == other.Nexthdr { - if k.PortIsBroader(other) { - return 2 // */proto/* is a superset of ID/proto/x - } else if k.PortIsEqual(other) { - return 3 // */proto/port is a superset of ID/proto/port - } // else more specific or different ports - } // else more specific or different protocol - } - } else if k.Identity == other.Identity { - if k.Nexthdr == 0 { - if other.Nexthdr != 0 { - return 4 // ID/*/* is a superset of ID/proto/x - } // else both are ID/*/* - } else if k.Nexthdr == other.Nexthdr { - if k.PortIsBroader(other) { - return 5 // ID/proto/* is a superset of ID/proto/port - } // else more specific or different ports - } // else more specific or different protocol - } // else more specific or different identity - return 0 +// overrideAuthType sets the AuthType of 'v' to that of 'newKey', saving the old entry in 'changes'. +func (ms *mapState) overrideAuthType(newEntry mapStateEntry, k Key, v mapStateEntry, changes ChangeState) { + // Save the old value first + changes.insertOldIfNotExists(k, v) + + // Auth type can be changed in-place, trie is not affected + v.AuthType = newEntry.AuthType + ms.allows.entries[k] = v } // authPreferredInsert applies AuthType of a more generic entry to more specific entries, if not @@ -1249,344 +1162,128 @@ func IsSuperSetOf(k, other Key) int { // This function is expected to be called for a map insertion after deny // entry evaluation. If there is a map entry that is a superset of 'newKey' // which denies traffic matching 'newKey', then this function should not be called. -func (ms *mapState) authPreferredInsert(newKey Key, newEntry MapStateEntry, features policyFeatures, changes ChangeState) { - if features.contains(authRules) { - if newEntry.hasAuthType == DefaultAuthType { - // New entry has a default auth type. - // Fill in the AuthType from more generic entries with an explicit auth type - maxSpecificity := 0 - var l3l4State MapStateMap - - ms.allows.ForEachKeyWithBroaderOrEqualPortProto(newKey, func(k Key, v MapStateEntry) bool { - // Nothing to be done if entry has default AuthType - if v.hasAuthType == DefaultAuthType { - return true - } +func (ms *mapState) authPreferredInsert(newKey Key, newEntry mapStateEntry, changes ChangeState) { + if newEntry.hasAuthType == DefaultAuthType { + // New entry has a default auth type. + + // Fill in the AuthType from the most specific covering key with an explicit + // auth type + for _, v := range ms.allows.CoveringKeys(newKey) { + if v.hasAuthType == ExplicitAuthType { + // AuthType from the most specific covering key is applied to + // 'newEntry' + newEntry.AuthType = v.AuthType + break + } + } - // Find out if 'k' is an identity-port-proto superset of 'newKey' - if specificity := IsSuperSetOf(k, newKey); specificity > 0 { - if specificity > maxSpecificity { - // AuthType from the most specific superset is - // applied to 'newEntry' - newEntry.AuthType = v.AuthType - maxSpecificity = specificity - } - } else { - // Check if a new L3L4 entry must be created due to L3-only - // 'k' specifying an explicit AuthType and an L4-only 'newKey' not - // having an explicit AuthType. In this case AuthType should - // only override the AuthType for the L3 & L4 combination, - // not L4 in general. - // - // These need to be collected and only added if there is a - // superset key of newKey with an explicit auth type. In - // this case AuthType of the new L4-only entry was - // overridden by a more generic entry and 'max_specificity > - // 0' after the loop. - if newKey.Identity == 0 && newKey.Nexthdr != 0 && newKey.DestPort != 0 && - k.Identity != 0 && (k.Nexthdr == 0 || k.Nexthdr == newKey.Nexthdr && k.DestPort == 0) { - newKeyCpy := newKey - newKeyCpy.Identity = k.Identity - l3l4AuthEntry := NewMapStateEntry(k, v.DerivedFromRules, newEntry.ProxyPort, newEntry.Listener, newEntry.priority, false, DefaultAuthType, v.AuthType) - l3l4AuthEntry.DerivedFromRules.MergeSorted(newEntry.DerivedFromRules) - - if l3l4State == nil { - l3l4State = make(MapStateMap) - } - l3l4State[newKeyCpy] = l3l4AuthEntry - } + // Override the AuthType for specific L3/4 keys, if the newKey is L4-only, + // and there is a key with broader port/proto for a specific identity that + // has an explicit auth type. + if newKey.Identity == 0 && newKey.Nexthdr != 0 { // L4-only newKey + for k, v := range ms.allows.BroaderKeysWithSpecificID(newKey) { + if v.hasAuthType == ExplicitAuthType { + ms.insertAuthOverrideFromKey(k, v, newKey, newEntry, changes) } - return true - }) - // Add collected L3/L4 entries if the auth type of the new entry was not - // overridden by a more generic entry. If it was overridden, the new L3L4 - // entries are not needed as the L4-only entry with an overridden AuthType - // will be matched before the L3-only entries in the datapath. - if maxSpecificity == 0 { - for k, v := range l3l4State { - ms.addKeyWithChanges(k, v, changes) - // L3-only entries can be deleted incrementally so we need to track their - // effects on other entries so that those effects can be reverted when the - // identity is removed. - newEntry.AddDependent(k) + } + } + } else { // New entry has an explicit auth type + // Check if the new key is the most specific covering key of any other key + // with the default auth type, and propagate the auth type from the new + // entry to such entries. + if newKey.Identity == 0 { + // A key with a wildcard ID can be the most specific covering key + // for keys with any ID. Hence we need to iterate narrower keys with + // all IDs and: + // - change all iterated keys with a default auth type + // to the auth type of the newKey. + // - stop iteration for any given ID at first key with that ID that + // has an explicit auth type, as that is the most specific covering + // key for the remaining subset keys with that specific ID. + seenIDs := make(IDSet) + for k, v := range ms.allows.SubsetKeys(newKey) { + // Skip if a subset entry has an explicit auth type + if v.hasAuthType == ExplicitAuthType { + // Keep track of IDs for which an explicit auth type + // has been encountered. + seenIDs[k.Identity] = struct{}{} + continue + } + // Override entries for which an explicit auth type has not been + // seen yet. + if _, exists := seenIDs[k.Identity]; !exists { + ms.overrideAuthType(newEntry, k, v, changes) } } } else { - // New entry has an explicit auth type. - // Check if the new entry is the most specific superset of any other entry - // with the default auth type, and propagate the auth type from the new - // entry to such entries. - explicitSubsetKeys := make(Keys) - defaultSubsetKeys := make(map[Key]int) - - ms.allows.ForEachKeyWithNarrowerOrEqualPortProto(newKey, func(k Key, v MapStateEntry) bool { - // Find out if 'newKey' is a superset of 'k' - if specificity := IsSuperSetOf(newKey, k); specificity > 0 { - if v.hasAuthType == ExplicitAuthType { - // store for later comparison - explicitSubsetKeys[k] = struct{}{} - } else { - defaultSubsetKeys[k] = specificity - } - } else if v.hasAuthType == DefaultAuthType { - // Check if a new L3L4 entry must be created due to L3-only - // 'newKey' with an explicit AuthType and an L4-only 'k' not - // having an explicit AuthType. In this case AuthType should - // only override the AuthType for the L3 & L4 combination, - // not L4 in general. - if newKey.Identity != 0 && (newKey.Nexthdr == 0 || newKey.Nexthdr == k.Nexthdr && newKey.DestPort == 0) && - k.Identity == 0 && k.Nexthdr != 0 && k.DestPort != 0 { - newKeyCpy := k - newKeyCpy.Identity = newKey.Identity - l3l4AuthEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, v.ProxyPort, v.Listener, v.priority, false, DefaultAuthType, newEntry.AuthType) - l3l4AuthEntry.DerivedFromRules.MergeSorted(v.DerivedFromRules) - ms.addKeyWithChanges(newKeyCpy, l3l4AuthEntry, changes) - // L3-only entries can be deleted incrementally so we need to track their - // effects on other entries so that those effects can be reverted when the - // identity is removed. - newEntry.AddDependent(newKeyCpy) - } + // A key with a specific ID can be the most specific covering key + // only for keys with the same ID. However, a wildcard ID key can also be + // the most specific covering key for those keys, if it has a more + // specific proto/port than the newKey. Hence we need to iterate + // narrower keys with the same or ANY ID and: + // - change all iterated keys with the same ID and a default auth + // type to the auth type of the newKey + // - stop iteration at first key with an explicit auth, as that is + // the most specific covering key for the remaining subset keys with + // the same ID. + for k, v := range ms.allows.SubsetKeys(newKey) { + // Stop if a subset entry has an explicit auth type, as that is more + // specific for all remaining subset keys + if v.hasAuthType == ExplicitAuthType { + break + } + // auth only propagates from a key with specific ID + // to keys with the same ID. + if k.Identity != 0 { + ms.overrideAuthType(newEntry, k, v, changes) } + } - return true - }) - // Find out if this newKey is the most specific superset for all the subset keys with default auth type - Next: - for k, specificity := range defaultSubsetKeys { - for l := range explicitSubsetKeys { - if s := IsSuperSetOf(l, k); s > specificity { - // k has a more specific superset key than the newKey, skip - continue Next + // Override authtype for specific L3L4 keys if 'newKey' with a + // specific ID has an explicit AuthType and an L4-only 'k' has a + // default AuthType. In this case AuthType of 'newEntry' should only + // override the AuthType for the L3 & L4 combination, not L4 in + // general. + // + // Only (partially) wildcarded port can have narrower keys. + if newKey.HasPortWildcard() { + for k, v := range ms.allows.NarrowerKeysWithWildcardID(newKey) { + if v.hasAuthType == DefaultAuthType { + ms.insertAuthOverrideFromNewKey(newKey, &newEntry, k, v, changes) } } - // newKey is the most specific superset with an explicit auth type, - // propagate auth type from newEntry to the entry of k - v, _ := ms.Get(k) - v.AuthType = newEntry.AuthType - ms.addKeyWithChanges(k, v, changes) // Update the map value } } } - ms.addKeyWithChanges(newKey, newEntry, changes) -} -var visibilityDerivedFromLabels = labels.LabelArray{ - labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelVisibilityAnnotation, labels.LabelSourceReserved), + ms.addKeyWithChanges(newKey, newEntry, changes) } -var visibilityDerivedFrom = labels.LabelArrayList{visibilityDerivedFromLabels} - -// insertIfNotExists only inserts `key=value` if `key` does not exist in keys already -// returns 'true' if 'key=entry' was added to 'keys' -func (changes *ChangeState) insertOldIfNotExists(key Key, entry MapStateEntry) bool { - if changes == nil || changes.Old == nil { +// insertIfNotExists only inserts an entry in 'changes.Old' if 'key' does not exist in there already +// and 'key' does not already exist in 'changes.Adds'. This prevents recording "old" values for +// newly added keys. When an entry is updated, we are called before the key is added to +// 'changes.Adds' so we'll record the old value as expected. +// Returns 'true' if an old entry was added. +func (changes *ChangeState) insertOldIfNotExists(key Key, entry mapStateEntry) bool { + if changes == nil || changes.old == nil { return false } - if _, exists := changes.Old[key]; !exists { + if _, exists := changes.old[key]; !exists { // Only insert the old entry if the entry was not first added on this round of // changes. if _, added := changes.Adds[key]; !added { - // new containers to keep this entry separate from the one that may remain in 'keys' - entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules) - entry.owners = maps.Clone(entry.owners) + // Clone to keep this entry separate from the one that may remain in 'keys' + entry.derivedFromRules = slices.Clone(entry.derivedFromRules) + entry.owners = entry.owners.Clone() entry.dependents = maps.Clone(entry.dependents) - changes.Old[key] = entry + changes.old[key] = entry return true } } return false } -// ForEachKeyWithPortProto calls 'f' for each Key and MapStateEntry, where the Key has the same traffic direction and and L4 fields (protocol, destination port and mask). -func (msm *mapStateMap) ForEachKeyWithPortProto(key Key, f func(Key, MapStateEntry) bool) { - // 'Identity' field in 'key' is ignored on by ExactLookup - idSet, ok := msm.trie.ExactLookup(key.PrefixLength(), key) - if ok { - for id := range idSet { - k := key - k.Identity = id - if !msm.forKey(k, f) { - return - } - } - } -} - -// addVisibilityKeys adjusts and expands PolicyMapState keys -// and values to redirect for visibility on the port of the visibility -// annotation while still denying traffic on this port for identities -// for which the traffic is denied. -// -// Datapath lookup order is, from highest to lowest precedence: -// 1. L3/L4 -// 2. L4-only (wildcard L3) -// 3. L3-only (wildcard L4) -// 4. Allow-all -// -// This means that the L4-only allow visibility key can only be added if there is an -// allow-all key, and all L3-only deny keys are expanded to L3/L4 keys. If no -// L4-only key is added then also the L3-only allow keys need to be expanded to -// L3/L4 keys for visibility redirection. In addition the existing L3/L4 and L4-only -// allow keys need to be redirected to the proxy port, if not already redirected. -// -// The above can be accomplished by: -// -// 1. Change existing L4-only ALLOW key on matching port that does not already -// redirect to redirect. -// - e.g., 0:80=allow,0 -> 0:80=allow, -// 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only -// key does not already exist. -// - e.g., 0:0=allow,0 -> add 0:80=allow, if 0:80 does not exist -// - this allows all traffic on port 80, but see step 5 below. -// 3. Change all L3/L4 ALLOW keys on matching port that do not already redirect to -// redirect. -// - e.g, :80=allow,0 -> :80=allow, -// 4. For each L3-only ALLOW key add the corresponding L3/L4 ALLOW redirect if no -// L3/L4 key already exists and no L4-only key already exists and one is not added. -// - e.g., :0=allow,0 -> add :80=allow, if :80 -// and 0:80 do not exist -// 5. If a new L4-only key was added: For each L3-only DENY key add the -// corresponding L3/L4 DENY key if no L3/L4 key already exists. -// - e.g., :0=deny,0 -> add :80=deny,0 if :80 does not exist -// -// With the above we only change/expand existing allow keys to redirect, and -// expand existing drop keys to also drop on the port of interest, if a new -// L4-only key allowing the port is added. -// -// 'adds' and 'oldValues' are updated with the changes made. 'adds' contains both the added and -// changed keys. 'oldValues' contains the old values for changed keys. This function does not -// delete any keys. -func (ms *mapState) addVisibilityKeys(e PolicyOwner, redirectPort uint16, visMeta *VisibilityMetadata, changes ChangeState) { - direction := trafficdirection.Egress - if visMeta.Ingress { - direction = trafficdirection.Ingress - } - - key := KeyForDirection(direction).WithPortProto(visMeta.Proto, visMeta.Port) - entry := NewMapStateEntry(nil, visibilityDerivedFrom, redirectPort, "", 0, false, DefaultAuthType, AuthTypeDisabled) - - _, haveAllowAllKey := ms.Get(allKey[direction]) - l4Only, haveL4OnlyKey := ms.Get(key) - addL4OnlyKey := false - if haveL4OnlyKey && !l4Only.IsDeny && l4Only.ProxyPort == 0 { - // 1. Change existing L4-only ALLOW key on matching port that does not already - // redirect to redirect. - e.PolicyDebug(logrus.Fields{ - logfields.BPFMapKey: key, - logfields.BPFMapValue: entry, - }, "addVisibilityKeys: Changing L4-only ALLOW key for visibility redirect") - ms.addKeyWithChanges(key, entry, changes) - } - if haveAllowAllKey && !haveL4OnlyKey { - // 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only - // key does not already exist. - e.PolicyDebug(logrus.Fields{ - logfields.BPFMapKey: key, - logfields.BPFMapValue: entry, - }, "addVisibilityKeys: Adding L4-only ALLOW key for visibility redirect") - addL4OnlyKey = true - ms.addKeyWithChanges(key, entry, changes) - } - // We need to make changes to the map - // outside of iteration. - var updates []MapChange - // - // Loop through all L3 keys in the traffic direction of the new key - // - - // Find entries with the same L4 - ms.allows.ForEachKeyWithPortProto(key, func(k Key, v MapStateEntry) bool { - if k.Identity != 0 { - if v.ProxyPort == 0 { - // 3. Change all L3/L4 ALLOW keys on matching port that do not - // already redirect to redirect. - v.ProxyPort = redirectPort - // redirect port is used as the default priority for tie-breaking - // purposes when two different selectors have conflicting - // redirects. Explicit listener references in the policy can specify - // a priority, but only the default is used for visibility policy, - // as visibility will be achieved by any of the redirects. - v.priority = redirectPort - v.Listener = "" - v.DerivedFromRules = visibilityDerivedFrom - e.PolicyDebug(logrus.Fields{ - logfields.BPFMapKey: k, - logfields.BPFMapValue: v, - }, "addVisibilityKeys: Changing L3/L4 ALLOW key for visibility redirect") - updates = append(updates, MapChange{ - Add: true, - Key: k, - Value: v, - }) - } - } - return true - }) - - // Find Wildcarded L4 allows, i.e., L3-only entries - if !haveL4OnlyKey && !addL4OnlyKey { - ms.allows.ForEachKeyWithPortProto(allKey[key.TrafficDirection()], func(k Key, v MapStateEntry) bool { - if k.Identity != 0 { - k2 := key - k2.Identity = k.Identity - // 4. For each L3-only ALLOW key add the corresponding L3/L4 - // ALLOW redirect if no L3/L4 key already exists and no - // L4-only key already exists and one is not added. - if _, ok := ms.Get(k2); !ok { - d2 := labels.LabelArrayList{visibilityDerivedFromLabels} - d2.MergeSorted(v.DerivedFromRules) - v2 := NewMapStateEntry(k, d2, redirectPort, "", 0, false, v.hasAuthType, v.AuthType) - e.PolicyDebug(logrus.Fields{ - logfields.BPFMapKey: k2, - logfields.BPFMapValue: v2, - }, "addVisibilityKeys: Extending L3-only ALLOW key to L3/L4 key for visibility redirect") - updates = append(updates, MapChange{ - Add: true, - Key: k2, - Value: v2, - }) - // Mark the new entry as a dependent of 'v' - ms.addDependentOnEntry(k, v, k2, changes) - } - } - return true - }) - } - - // Find Wildcarded L4 denies, i.e., L3-only entries - if addL4OnlyKey { - ms.denies.ForEachKeyWithPortProto(allKey[key.TrafficDirection()], func(k Key, v MapStateEntry) bool { - if k.Identity != 0 { - k2 := key - k2.Identity = k.Identity - // 5. If a new L4-only key was added: For each L3-only DENY - // key add the corresponding L3/L4 DENY key if no L3/L4 - // key already exists. - if _, ok := ms.Get(k2); !ok { - v2 := NewMapStateEntry(k, v.DerivedFromRules, 0, "", 0, true, DefaultAuthType, AuthTypeDisabled) - e.PolicyDebug(logrus.Fields{ - logfields.BPFMapKey: k2, - logfields.BPFMapValue: v2, - }, "addVisibilityKeys: Extending L3-only DENY key to L3/L4 key to deny a port with visibility annotation") - updates = append(updates, MapChange{ - Add: true, - Key: k2, - Value: v2, - }) - // Mark the new entry as a dependent of 'v' - ms.addDependentOnEntry(k, v, k2, changes) - } - } - return true - }) - } - - for _, update := range updates { - ms.addKeyWithChanges(update.Key, update.Value, changes) - } -} - // determineAllowLocalhostIngress determines whether communication should be allowed // from the localhost. It inserts the Key corresponding to the localhost in // the desiredPolicyKeys if the localhost is allowed to communicate with the @@ -1598,8 +1295,8 @@ func (ms *mapState) determineAllowLocalhostIngress() { labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowLocalHostIngress, labels.LabelSourceReserved), }, } - es := NewMapStateEntry(nil, derivedFrom, 0, "", 0, false, ExplicitAuthType, AuthTypeDisabled) // Authentication never required for local host ingress - ms.denyPreferredInsert(localHostKey, es, allFeatures) + entry := newMapStateEntry(nil, derivedFrom, 0, 0, false, ExplicitAuthType, AuthTypeDisabled) // Authentication never required for local host ingress + ms.insertWithChanges(localHostKey, entry, allFeatures, ChangeState{}) } } @@ -1609,98 +1306,11 @@ func (ms *mapState) determineAllowLocalhostIngress() { // Note that this is used when policy is not enforced, so authentication is explicitly not required. func (ms *mapState) allowAllIdentities(ingress, egress bool) { if ingress { - derivedFrom := labels.LabelArrayList{ - labels.LabelArray{ - labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved), - }, - } - ms.allows.upsert(allKey[trafficdirection.Ingress], NewMapStateEntry(nil, derivedFrom, 0, "", 0, false, ExplicitAuthType, AuthTypeDisabled)) + ms.allows.upsert(allKey[trafficdirection.Ingress], newMapStateEntry(nil, LabelsAllowAnyIngress, 0, 0, false, ExplicitAuthType, AuthTypeDisabled)) } if egress { - derivedFrom := labels.LabelArrayList{ - labels.LabelArray{ - labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved), - }, - } - ms.allows.upsert(allKey[trafficdirection.Egress], NewMapStateEntry(nil, derivedFrom, 0, "", 0, false, ExplicitAuthType, AuthTypeDisabled)) - } -} - -func (ms *mapState) deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool { - port := uint16(l4.Port) - proto := l4.U8Proto - - // resolve named port - if port == 0 && l4.PortName != "" { - port = policyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto) - if port == 0 { - return true - } - } - - var key Key - if l4.Ingress { - key = allKey[trafficdirection.Ingress] - } else { - key = allKey[trafficdirection.Egress] + ms.allows.upsert(allKey[trafficdirection.Egress], newMapStateEntry(nil, LabelsAllowAnyEgress, 0, 0, false, ExplicitAuthType, AuthTypeDisabled)) } - - // Are we explicitly denying all traffic? - v, ok := ms.Get(key) - if ok && v.IsDeny { - return true - } - - // Are we explicitly denying this L4-only traffic? - key.DestPort = port - key.Nexthdr = proto - v, ok = ms.Get(key) - if ok && v.IsDeny { - return true - } - - // The given L4 is not categorically denied. - // Traffic to/from a specific L3 on any of the selectors can still be denied. - return false -} - -func (ms *mapState) GetIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) { - return ms.getIdentities(log, false) -} - -func (ms *mapState) GetDenyIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) { - return ms.getIdentities(log, true) -} - -// GetIdentities returns the ingress and egress identities stored in the -// MapState. -// Used only for API requests. -func (ms *mapState) getIdentities(log *logrus.Logger, denied bool) (ingIdentities, egIdentities []int64) { - ms.ForEach(func(key Key, entry MapStateEntry) bool { - if denied != entry.IsDeny { - return true - } - if key.DestPort != 0 { - // If the port is non-zero, then the Key no longer only applies - // at L3. AllowedIngressIdentities and AllowedEgressIdentities - // contain sets of which identities (i.e., label-based L3 only) - // are allowed, so anything which contains L4-related policy should - // not be added to these sets. - return true - } - switch key.TrafficDirection() { - case trafficdirection.Ingress: - ingIdentities = append(ingIdentities, int64(key.Identity)) - case trafficdirection.Egress: - egIdentities = append(egIdentities, int64(key.Identity)) - default: - td := key.TrafficDirection() - log.WithField(logfields.TrafficDirection, td). - Errorf("Unexpected traffic direction present in policy map state for endpoint") - } - return true - }) - return ingIdentities, egIdentities } // MapChanges collects updates to the endpoint policy on the @@ -1709,11 +1319,17 @@ func (ms *mapState) getIdentities(log *logrus.Logger, denied bool) (ingIdentitie type MapChanges struct { firstVersion versioned.KeepVersion mutex lock.Mutex - changes []MapChange - synced []MapChange + changes []mapChange + synced []mapChange version *versioned.VersionHandle } +type mapChange struct { + Add bool // false deletes + Key Key + Value mapStateEntry +} + type MapChange struct { Add bool // false deletes Key Key @@ -1725,19 +1341,27 @@ type MapChange struct { // // The caller is responsible for making sure the same identity is not // present in both 'adds' and 'deletes'. -func (mc *MapChanges) AccumulateMapChanges(cs CachedSelector, adds, deletes []identity.NumericIdentity, keys []Key, value MapStateEntry) { +func (mc *MapChanges) AccumulateMapChanges(adds, deletes []identity.NumericIdentity, keys []Key, value mapStateEntry) { mc.mutex.Lock() defer mc.mutex.Unlock() for _, id := range adds { for _, k := range keys { k.Identity = id - mc.changes = append(mc.changes, MapChange{Add: true, Key: k, Value: value}) + mc.changes = append(mc.changes, mapChange{ + Add: true, + Key: k, + Value: value, + }) } } for _, id := range deletes { for _, k := range keys { k.Identity = id - mc.changes = append(mc.changes, MapChange{Add: false, Key: k, Value: value}) + mc.changes = append(mc.changes, mapChange{ + Add: false, + Key: k, + Value: value, + }) } } } @@ -1780,42 +1404,22 @@ func (mc *MapChanges) consumeMapChanges(p *EndpointPolicy, features policyFeatur changes := ChangeState{ Adds: make(Keys, len(mc.synced)), Deletes: make(Keys, len(mc.synced)), - Old: make(map[Key]MapStateEntry, len(mc.synced)), - } - - var redirects map[string]uint16 - if p.PolicyOwner != nil { - redirects = p.PolicyOwner.GetRealizedRedirects() + old: make(map[Key]mapStateEntry, len(mc.synced)), } for i := range mc.synced { - if mc.synced[i].Add { - // Redirect entries for unrealized redirects come in with an invalid - // redirect port (65535), replace it with the actual proxy port number. - key := mc.synced[i].Key - entry := mc.synced[i].Value - if entry.ProxyPort == unrealizedRedirectPort { - var exists bool - proxyID := ProxyIDFromKey(uint16(p.PolicyOwner.GetID()), key, entry.Listener) - entry.ProxyPort, exists = redirects[proxyID] - if !exists { - log.WithFields(logrus.Fields{ - logfields.PolicyKey: key, - logfields.PolicyEntry: entry, - }).Warn("consumeMapChanges: Skipping entry for unrealized redirect") - continue - } - } + key := mc.synced[i].Key + entry := mc.synced[i].Value - // insert but do not allow non-redirect entries to overwrite a redirect entry, - // nor allow non-deny entries to overwrite deny entries. - // Collect the incremental changes to the overall state in 'mc.adds' and 'mc.deletes'. - p.policyMapState.denyPreferredInsertWithChanges(key, entry, features, changes) + if mc.synced[i].Add { + // Insert the key to and collect the incremental changes to the overall + // state in 'changes' + p.policyMapState.insertWithChanges(key, entry, features, changes) } else { - // Delete the contribution of this cs to the key and collect incremental changes - for cs := range mc.synced[i].Value.owners { // get the sole selector - p.policyMapState.deleteKeyWithChanges(mc.synced[i].Key, cs, changes) - } + // Delete the contribution of this cs to the key and collect incremental + // changes + cs, _ := entry.owners.Get() // get the sole selector + p.policyMapState.deleteKeyWithChanges(key, cs, changes) } } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/repository.go b/vendor/github.com/cilium/cilium/pkg/policy/repository.go index a1e3aac373..11018fd7cb 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/repository.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/repository.go @@ -13,6 +13,7 @@ import ( "sync/atomic" cilium "github.com/cilium/proxy/go/cilium/api" + "k8s.io/apimachinery/pkg/util/sets" "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/crypto/certificatemanager" @@ -106,14 +107,46 @@ func (p *policyContext) SetDeny(deny bool) bool { return oldDeny } +// RepositoryLock exposes methods to protect the whole policy tree. +type RepositoryLock interface { + Lock() + Unlock() + RLock() + RUnlock() +} + +type PolicyRepository interface { + RepositoryLock + + AddListLocked(rules api.Rules) (ruleSlice, uint64) + BumpRevision() uint64 + DeleteByLabelsLocked(lbls labels.LabelArray) (ruleSlice, uint64, int) + DeleteByResourceLocked(rid ipcachetypes.ResourceID) (ruleSlice, uint64) + GetAuthTypes(localID identity.NumericIdentity, remoteID identity.NumericIdentity) AuthTypes + GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool) + GetPolicyCache() *PolicyCache + GetRevision() uint64 + GetRulesList() *models.Policy + GetSelectorCache() *SelectorCache + GetRepositoryChangeQueue() *eventqueue.EventQueue + GetRuleReactionQueue() *eventqueue.EventQueue + Iterate(f func(rule *api.Rule)) + Release(rs ruleSlice) + ReplaceByResourceLocked(rules api.Rules, resource ipcachetypes.ResourceID) (newRules ruleSlice, oldRules ruleSlice, revision uint64) + SearchRLocked(lbls labels.LabelArray) api.Rules + SetEnvoyRulesFunc(f func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool)) + Start() +} + // Repository is a list of policy rules which in combination form the security // policy. A policy repository can be type Repository struct { - // Mutex protects the whole policy tree - Mutex lock.RWMutex + // mutex protects the whole policy tree + mutex lock.RWMutex - rules map[ruleKey]*rule - rulesByResource map[ipcachetypes.ResourceID]map[ruleKey]*rule + rules map[ruleKey]*rule + rulesByNamespace map[string]sets.Set[ruleKey] + rulesByResource map[ipcachetypes.ResourceID]map[ruleKey]*rule // We will need a way to synthesize a rule key for rules without a resource; // these are - in practice - very rare, as they only come from the local API, @@ -125,15 +158,15 @@ type Repository struct { // Always positive (>0). revision atomic.Uint64 - // RepositoryChangeQueue is a queue which serializes changes to the policy + // repositoryChangeQueue is a queue which serializes changes to the policy // repository. - RepositoryChangeQueue *eventqueue.EventQueue + repositoryChangeQueue *eventqueue.EventQueue - // RuleReactionQueue is a queue which serializes the resultant events that + // ruleReactionQueue is a queue which serializes the resultant events that // need to occur after updating the state of the policy repository. This // can include queueing endpoint regenerations, policy revision increments // for endpoints, etc. - RuleReactionQueue *eventqueue.EventQueue + ruleReactionQueue *eventqueue.EventQueue // SelectorCache tracks the selectors used in the policies // resolved from the repository. @@ -148,11 +181,39 @@ type Repository struct { getEnvoyHTTPRules func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool) } +// Lock acquiers the lock of the whole policy tree. +func (p *Repository) Lock() { + p.mutex.Lock() +} + +// Unlock releases the lock of the whole policy tree. +func (p *Repository) Unlock() { + p.mutex.Unlock() +} + +// RLock acquiers the read lock of the whole policy tree. +func (p *Repository) RLock() { + p.mutex.RLock() +} + +// RUnlock releases the read lock of the whole policy tree. +func (p *Repository) RUnlock() { + p.mutex.RUnlock() +} + // GetSelectorCache() returns the selector cache used by the Repository func (p *Repository) GetSelectorCache() *SelectorCache { return p.selectorCache } +func (p *Repository) GetRepositoryChangeQueue() *eventqueue.EventQueue { + return p.repositoryChangeQueue +} + +func (p *Repository) GetRuleReactionQueue() *eventqueue.EventQueue { + return p.ruleReactionQueue +} + // GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID func (p *Repository) GetAuthTypes(localID, remoteID identity.NumericIdentity) AuthTypes { return p.policyCache.GetAuthTypes(localID, remoteID) @@ -180,7 +241,7 @@ func NewPolicyRepository( initialIDs identity.IdentityMap, certManager certificatemanager.CertificateManager, secretManager certificatemanager.SecretManager, - idmgr *identitymanager.IdentityManager, + idmgr identitymanager.IDManager, ) *Repository { repo := NewStoppedPolicyRepository(initialIDs, certManager, secretManager, idmgr) repo.Start() @@ -196,15 +257,16 @@ func NewStoppedPolicyRepository( initialIDs identity.IdentityMap, certManager certificatemanager.CertificateManager, secretManager certificatemanager.SecretManager, - idmgr *identitymanager.IdentityManager, + idmgr identitymanager.IDManager, ) *Repository { selectorCache := NewSelectorCache(initialIDs) repo := &Repository{ - rules: make(map[ruleKey]*rule), - rulesByResource: make(map[ipcachetypes.ResourceID]map[ruleKey]*rule), - selectorCache: selectorCache, - certManager: certManager, - secretManager: secretManager, + rules: make(map[ruleKey]*rule), + rulesByNamespace: make(map[string]sets.Set[ruleKey]), + rulesByResource: make(map[ipcachetypes.ResourceID]map[ruleKey]*rule), + selectorCache: selectorCache, + certManager: certManager, + secretManager: secretManager, } repo.revision.Store(1) repo.policyCache = NewPolicyCache(repo, idmgr) @@ -253,10 +315,10 @@ func (state *traceState) trace(rules int, ctx *SearchContext) { // // Must only be called if using [NewStoppedPolicyRepository] func (p *Repository) Start() { - p.RepositoryChangeQueue = eventqueue.NewEventQueueBuffered("repository-change-queue", option.Config.PolicyQueueSize) - p.RuleReactionQueue = eventqueue.NewEventQueueBuffered("repository-reaction-queue", option.Config.PolicyQueueSize) - p.RepositoryChangeQueue.Run() - p.RuleReactionQueue.Run() + p.repositoryChangeQueue = eventqueue.NewEventQueueBuffered("repository-change-queue", option.Config.PolicyQueueSize) + p.ruleReactionQueue = eventqueue.NewEventQueueBuffered("repository-reaction-queue", option.Config.PolicyQueueSize) + p.repositoryChangeQueue.Run() + p.ruleReactionQueue.Run() } // ResolveL4IngressPolicy resolves the L4 ingress policy for a set of endpoints @@ -451,6 +513,10 @@ func (p *Repository) ReplaceByResourceLocked(rules api.Rules, resource ipcachety func (p *Repository) insert(r *rule) { p.rules[r.key] = r + if _, ok := p.rulesByNamespace[r.key.resource.Namespace()]; !ok { + p.rulesByNamespace[r.key.resource.Namespace()] = sets.New[ruleKey]() + } + p.rulesByNamespace[r.key.resource.Namespace()].Insert(r.key) rid := r.key.resource if len(rid) > 0 { if p.rulesByResource[rid] == nil { @@ -467,6 +533,10 @@ func (p *Repository) del(key ruleKey) { return } delete(p.rules, key) + p.rulesByNamespace[key.resource.Namespace()].Delete(key) + if len(p.rulesByNamespace[key.resource.Namespace()]) == 0 { + delete(p.rulesByNamespace, key.resource.Namespace()) + } rid := key.resource if len(rid) > 0 && p.rulesByResource[rid] != nil { @@ -509,16 +579,16 @@ func (p *Repository) MustAddList(rules api.Rules) (ruleSlice, uint64) { panic(err) } } - p.Mutex.Lock() - defer p.Mutex.Unlock() + p.mutex.Lock() + defer p.mutex.Unlock() return p.AddListLocked(rules) } // Iterate iterates the policy repository, calling f for each rule. It is safe // to execute Iterate concurrently. func (p *Repository) Iterate(f func(rule *api.Rule)) { - p.Mutex.RWMutex.Lock() - defer p.Mutex.RWMutex.Unlock() + p.mutex.RWMutex.Lock() + defer p.mutex.RWMutex.Unlock() for _, r := range p.rules { f(&r.Rule) } @@ -584,8 +654,8 @@ func (p *Repository) DeleteByResourceLocked(rid ipcachetypes.ResourceID) (ruleSl // DeleteByLabels deletes all rules in the policy repository which contain the // specified labels func (p *Repository) DeleteByLabels(lbls labels.LabelArray) (uint64, int) { - p.Mutex.Lock() - defer p.Mutex.Unlock() + p.mutex.Lock() + defer p.mutex.Unlock() _, rev, numDeleted := p.DeleteByLabelsLocked(lbls) return rev, numDeleted } @@ -600,24 +670,10 @@ func JSONMarshalRules(rules api.Rules) string { return string(b) } -// GetJSON returns all rules of the policy repository as string in JSON -// representation -func (p *Repository) GetJSON() string { - p.Mutex.RLock() - defer p.Mutex.RUnlock() - - result := api.Rules{} - for _, r := range p.rules { - result = append(result, &r.Rule) - } - - return JSONMarshalRules(result) -} - // GetRulesMatching returns whether any of the rules in a repository contain a // rule with labels matching the labels in the provided LabelArray. // -// Must be called with p.Mutex held +// Must be called with p.mutex held func (p *Repository) GetRulesMatching(lbls labels.LabelArray) (ingressMatch bool, egressMatch bool) { ingressMatch = false egressMatch = false @@ -645,27 +701,11 @@ func (p *Repository) GetRulesMatching(lbls labels.LabelArray) (ingressMatch bool return } -// NumRules returns the amount of rules in the policy repository. -// -// Must be called with p.Mutex held -func (p *Repository) NumRules() int { - return len(p.rules) -} - // GetRevision returns the revision of the policy repository func (p *Repository) GetRevision() uint64 { return p.revision.Load() } -// Empty returns 'true' if repository has no rules, 'false' otherwise. -// -// Must be called without p.Mutex held -func (p *Repository) Empty() bool { - p.Mutex.Lock() - defer p.Mutex.Unlock() - return p.NumRules() == 0 -} - // BumpRevision allows forcing policy regeneration func (p *Repository) BumpRevision() uint64 { metrics.PolicyRevision.Inc() @@ -674,8 +714,8 @@ func (p *Repository) BumpRevision() uint64 { // GetRulesList returns the current policy func (p *Repository) GetRulesList() *models.Policy { - p.Mutex.RLock() - defer p.Mutex.RUnlock() + p.mutex.RLock() + defer p.mutex.RUnlock() lbls := labels.ParseSelectLabelArrayFromArray([]string{}) ruleList := p.SearchRLocked(lbls) @@ -777,11 +817,23 @@ func (p *Repository) computePolicyEnforcementAndRules(securityIdentity *identity } matchingRules = []*rule{} - for _, r := range p.rules { + // Match cluster-wide rules + for rKey := range p.rulesByNamespace[""] { + r := p.rules[rKey] if r.matchesSubject(securityIdentity) { matchingRules = append(matchingRules, r) } } + // Match namespace-specific rules + namespace := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel) + if namespace != "" { + for rKey := range p.rulesByNamespace[namespace] { + r := p.rules[rKey] + if r.matchesSubject(securityIdentity) { + matchingRules = append(matchingRules, r) + } + } + } // If policy enforcement is enabled for the daemon, then it has to be // enabled for the endpoint. diff --git a/vendor/github.com/cilium/cilium/pkg/policy/resolve.go b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go index 5a357534f8..75b15ec6cc 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/resolve.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go @@ -4,9 +4,14 @@ package policy import ( + "errors" + "fmt" + "iter" + "github.com/sirupsen/logrus" "github.com/cilium/cilium/pkg/container/versioned" + "github.com/cilium/cilium/pkg/labels" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/u8proto" ) @@ -59,21 +64,35 @@ type EndpointPolicy struct { // Proxy port 0 indicates no proxy redirection. // All fields within the Key and the proxy port must be in host byte-order. // Must only be accessed with PolicyOwner (aka Endpoint) lock taken. - policyMapState MapState + policyMapState *mapState // policyMapChanges collects pending changes to the PolicyMapState policyMapChanges MapChanges // PolicyOwner describes any type which consumes this EndpointPolicy object. PolicyOwner PolicyOwner + + // Redirects contains the proxy ports needed for this EndpointPolicy. + // If any redirects are missing a new policy will be computed to rectify it, so this is + // constant for the lifetime of this EndpointPolicy. + Redirects map[string]uint16 +} + +// LookupRedirectPort returns the redirect L4 proxy port for the given input parameters. +// Returns 0 if not found or the filter doesn't require a redirect. +// Returns an error if the redirect port can not be found. +// This is called when accumulating incremental map changes, endpoint lock must not be taken. +func (p *EndpointPolicy) LookupRedirectPort(ingress bool, protocol string, port uint16, listener string) (uint16, error) { + proxyID := ProxyID(uint16(p.PolicyOwner.GetID()), ingress, protocol, port, listener) + if proxyPort, exists := p.Redirects[proxyID]; exists { + return proxyPort, nil + } + return 0, fmt.Errorf("Proxy port for redirect %q not found", proxyID) } // PolicyOwner is anything which consumes a EndpointPolicy. type PolicyOwner interface { GetID() uint64 - LookupRedirectPort(ingress bool, protocol string, port uint16, listener string) (uint16, error) - GetRealizedRedirects() map[string]uint16 - HasBPFPolicyMap() bool GetNamedPort(ingress bool, name string, proto u8proto.U8proto) uint16 PolicyDebug(fields logrus.Fields, msg string) } @@ -113,7 +132,7 @@ func (p *selectorPolicy) Detach() { // Called without holding the Selector cache or Repository locks. // PolicyOwner (aka Endpoint) is also unlocked during this call, // but the Endpoint's build mutex is held. -func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, isHost bool) *EndpointPolicy { +func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, redirects map[string]uint16, isHost bool) *EndpointPolicy { var calculatedPolicy *EndpointPolicy // EndpointPolicy is initialized while 'GetCurrentVersionHandleFunc' keeps the selector @@ -132,14 +151,15 @@ func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, isHost bool) *En calculatedPolicy = &EndpointPolicy{ selectorPolicy: p, VersionHandle: version, - policyMapState: NewMapState(), + policyMapState: newMapState(), policyMapChanges: MapChanges{ firstVersion: version.Version(), }, PolicyOwner: policyOwner, + Redirects: redirects, } // Register the new EndpointPolicy as a receiver of incremental - // updates before selector cache lock is released by 'GetHandle'. + // updates before selector cache lock is released by 'GetCurrentVersionHandleFunc'. p.insertUser(calculatedPolicy) }) @@ -168,23 +188,6 @@ func (p *EndpointPolicy) Ready() (err error) { return err } -// GetPolicyMap gets the policy map state as the interface -// MapState -func (p *EndpointPolicy) GetPolicyMap() MapState { - return p.policyMapState -} - -// SetPolicyMap sets the policy map state as the interface -// MapState. If the main argument is nil, then this method -// will initialize a new MapState object for the caller. -func (p *EndpointPolicy) SetPolicyMap(ms MapState) { - if ms == nil { - p.policyMapState = NewMapState() - return - } - p.policyMapState = ms -} - // Detach removes EndpointPolicy references from selectorPolicy // to allow the EndpointPolicy to be GC'd. // PolicyOwner (aka Endpoint) is also locked during this call. @@ -201,35 +204,115 @@ func (p *EndpointPolicy) Detach() { p.policyMapChanges.detach() } -// NewMapStateWithInsert returns a new MapState and an insert function that can be used to populate -// it. We keep general insert functions private so that the caller can only insert to this specific -// map. -func NewMapStateWithInsert() (MapState, func(k Key, e MapStateEntry)) { - currentMap := NewMapState() +func (p *EndpointPolicy) Len() int { + return p.policyMapState.Len() +} - return currentMap, func(k Key, e MapStateEntry) { - currentMap.insert(k, e) +func (p *EndpointPolicy) Get(key Key) (MapStateEntry, bool) { + return p.policyMapState.Get(key) +} + +var errMissingKey = errors.New("Key not found") + +// GetRuleLabels returns the list of labels of the rules that contributed +// to the entry at this key. +// The returned LabelArrayList is shallow-copied and therefore must not be mutated. +func (p *EndpointPolicy) GetRuleLabels(k Key) (labels.LabelArrayList, error) { + entry, ok := p.policyMapState.get(k) + if !ok { + return nil, errMissingKey } + return entry.GetRuleLabels(), nil } -func (p *EndpointPolicy) InsertMapState(key Key, entry MapStateEntry) { - // SelectorCache used as Identities interface which only has GetPrefix() that needs no lock - p.policyMapState.insert(key, entry) +func (p *EndpointPolicy) Entries() iter.Seq2[Key, MapStateEntry] { + return func(yield func(Key, MapStateEntry) bool) { + p.policyMapState.ForEach(yield) + } } -func (p *EndpointPolicy) DeleteMapState(key Key) { - // SelectorCache used as Identities interface which only has GetPrefix() that needs no lock - p.policyMapState.delete(key) +func (p *EndpointPolicy) Equals(other MapStateMap) bool { + return p.policyMapState.Equals(other) } -func (p *EndpointPolicy) RevertChanges(changes ChangeState) { - // SelectorCache used as Identities interface which only has GetPrefix() that needs no lock - p.policyMapState.revertChanges(changes) +func (p *EndpointPolicy) Diff(expected MapStateMap) string { + return p.policyMapState.Diff(expected) +} + +func (p *EndpointPolicy) Empty() bool { + return p.policyMapState.Empty() +} + +// Updated returns an iterator for all key/entry pairs in 'p' that are either new or updated +// compared to the entries in 'realized'. +// Here 'realized' is another EndpointPolicy. +// This can be used to figure out which entries need to be added to or updated in 'realised'. +func (p *EndpointPolicy) Updated(realized *EndpointPolicy) iter.Seq2[Key, MapStateEntry] { + return func(yield func(Key, MapStateEntry) bool) { + p.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool { + if oldEntry, ok := realized.policyMapState.Get(key); !ok || oldEntry != entry { + if !yield(key, entry) { + return false + } + } + return true + }) + } +} + +// Missing returns an iterator for all key/entry pairs in 'realized' that missing from 'p'. +// Here 'realized' is another EndpointPolicy. +// This can be used to figure out which entries in 'realised' need to be deleted. +func (p *EndpointPolicy) Missing(realized *EndpointPolicy) iter.Seq2[Key, MapStateEntry] { + return func(yield func(Key, MapStateEntry) bool) { + realized.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool { + // If key that is in realized state is not in desired state, just remove it. + if _, ok := p.policyMapState.Get(key); !ok { + if !yield(key, entry) { + return false + } + } + return true + }) + } +} + +// UpdatedMap returns an iterator for all key/entry pairs in 'p' that are either new or updated +// compared to the entries in 'realized'. +// Here 'realized' is MapStateMap. +// This can be used to figure out which entries need to be added to or updated in 'realised'. +func (p *EndpointPolicy) UpdatedMap(realized MapStateMap) iter.Seq2[Key, MapStateEntry] { + return func(yield func(Key, MapStateEntry) bool) { + p.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool { + if oldEntry, ok := realized[key]; !ok || oldEntry != entry { + if !yield(key, entry) { + return false + } + } + return true + }) + } } -func (p *EndpointPolicy) AddVisibilityKeys(e PolicyOwner, redirectPort uint16, visMeta *VisibilityMetadata, changes ChangeState) { +// Missing returns an iterator for all key/entry pairs in 'realized' that missing from 'p'. +// Here 'realized' is MapStateMap. +// This can be used to figure out which entries in 'realised' need to be deleted. +func (p *EndpointPolicy) MissingMap(realized MapStateMap) iter.Seq2[Key, MapStateEntry] { + return func(yield func(Key, MapStateEntry) bool) { + for k, v := range realized { + // If key that is in realized state is not in desired state, just remove it. + if _, ok := p.policyMapState.Get(k); !ok { + if !yield(k, v) { + break + } + } + } + } +} + +func (p *EndpointPolicy) RevertChanges(changes ChangeState) { // SelectorCache used as Identities interface which only has GetPrefix() that needs no lock - p.policyMapState.addVisibilityKeys(e, redirectPort, visMeta, changes) + p.policyMapState.revertChanges(changes) } // toMapState transforms the EndpointPolicy.L4Policy into @@ -251,43 +334,33 @@ func (p *EndpointPolicy) toMapState() { // but the Endpoint's build mutex is held. func (l4policy L4DirectionPolicy) toMapState(p *EndpointPolicy) { l4policy.PortRules.ForEach(func(l4 *L4Filter) bool { - l4.toMapState(p, l4policy.features, p.PolicyOwner.GetRealizedRedirects(), ChangeState{}) + l4.toMapState(p, l4policy.features, ChangeState{}) return true }) } -// createRedirectsFunc returns 'nil' if map changes should not be applied immemdiately, -// otherwise the returned map is to be used to find redirect ports for map updates. -type createRedirectsFunc func(*L4Filter) map[string]uint16 - -// UpdateRedirects updates redirects in the EndpointPolicy's PolicyMapState by using the provided -// function to create redirects. Changes to 'p.PolicyMapState' are collected in -// 'adds' and 'updated' so that they can be reverted when needed. -func (p *EndpointPolicy) UpdateRedirects(ingress bool, createRedirects createRedirectsFunc, changes ChangeState) { - l4policy := &p.L4Policy.Ingress - if ingress { - l4policy = &p.L4Policy.Egress +// RedirectFilters returns an iterator for each L4Filter with a redirect in the policy. +func (p *selectorPolicy) RedirectFilters() iter.Seq2[*L4Filter, *PerSelectorPolicy] { + return func(yield func(*L4Filter, *PerSelectorPolicy) bool) { + if p.L4Policy.Ingress.forEachRedirectFilter(yield) { + p.L4Policy.Egress.forEachRedirectFilter(yield) + } } - - l4policy.updateRedirects(p, createRedirects, changes) } -func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, createRedirects createRedirectsFunc, changes ChangeState) { +func (l4policy L4DirectionPolicy) forEachRedirectFilter(yield func(*L4Filter, *PerSelectorPolicy) bool) bool { + ok := true l4policy.PortRules.ForEach(func(l4 *L4Filter) bool { if l4.IsRedirect() { - // Check if we are denying this specific L4 first regardless the L3, if there are any deny policies - if l4policy.features.contains(denyRules) && p.policyMapState.deniesL4(p.PolicyOwner, l4) { - return true - } - - redirects := createRedirects(l4) - if redirects != nil { - // Set the proxy port in the policy map. - l4.toMapState(p, l4policy.features, redirects, changes) + for _, ps := range l4.PerSelectorPolicies { + if ps != nil && ps.IsRedirect() { + ok = yield(l4, ps) + } } } - return true + return ok }) + return ok } // ConsumeMapChanges transfers the changes from MapChanges to the caller. @@ -305,8 +378,8 @@ func (p *EndpointPolicy) ConsumeMapChanges() (closer func(), changes ChangeState closer = func() {} if version.IsValid() { var msg string - // update the version handle in p.VersionHandle so that any follow-on processing acts on the - // basis of the new version + // update the version handle in p.VersionHandle so that any follow-on processing + // acts on the basis of the new version if p.VersionHandle.IsValid() { p.VersionHandle.Close() msg = "ConsumeMapChanges: updated valid version" @@ -329,9 +402,9 @@ func (p *EndpointPolicy) ConsumeMapChanges() (closer func(), changes ChangeState } // NewEndpointPolicy returns an empty EndpointPolicy stub. -func NewEndpointPolicy(repo *Repository) *EndpointPolicy { +func NewEndpointPolicy(repo PolicyRepository) *EndpointPolicy { return &EndpointPolicy{ selectorPolicy: newSelectorPolicy(repo.GetSelectorCache()), - policyMapState: NewMapState(), + policyMapState: newMapState(), } } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/rule.go b/vendor/github.com/cilium/cilium/pkg/policy/rule.go index 94b79cfe8e..cb83005882 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/rule.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/rule.go @@ -335,7 +335,7 @@ func mergeIngressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoint return 0, err } - err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels) + err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge) if err != nil { return 0, err } @@ -762,7 +762,7 @@ func mergeEgressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints return 0, err } - err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels) + err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge) if err != nil { return 0, err } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go index 758196f5e2..946edafa60 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go @@ -536,6 +536,8 @@ func (sc *SelectorCache) UpdateIdentities(added, deleted identity.IdentityMap, w if updated { // Launch a waiter that holds the new version as long as needed for users to have grabbed it + sc.queueNotifiedUsersCommit(txn, wg) + go func(version *versioned.VersionHandle) { wg.Wait() log.WithFields(logrus.Fields{ @@ -544,7 +546,6 @@ func (sc *SelectorCache) UpdateIdentities(added, deleted identity.IdentityMap, w version.Close() }(txn.GetVersionHandle()) - sc.queueNotifiedUsersCommit(txn, wg) txn.Commit() } } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/trigger.go b/vendor/github.com/cilium/cilium/pkg/policy/trigger.go index 21615c1c6c..0b84674193 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/trigger.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/trigger.go @@ -28,7 +28,7 @@ func (u *Updater) TriggerPolicyUpdates(force bool, reason string) { // NewUpdater returns a new Updater instance to handle triggering policy // updates ready for use. -func NewUpdater(r *Repository, regen regenerator) *Updater { +func NewUpdater(r PolicyRepository, regen regenerator) *Updater { t, err := trigger.NewTrigger(trigger.Parameters{ Name: "policy_update", MetricsObserver: &TriggerMetrics{}, @@ -62,7 +62,7 @@ func NewUpdater(r *Repository, regen regenerator) *Updater { type Updater struct { *trigger.Trigger - repo *Repository + repo PolicyRepository } type regenerator interface { diff --git a/vendor/github.com/cilium/cilium/pkg/policy/types/types.go b/vendor/github.com/cilium/cilium/pkg/policy/types/types.go index c033761666..eae47b9420 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/types/types.go @@ -132,6 +132,8 @@ func (k Key) WithIdentity(nid identity.NumericIdentity) Key { // TrafficDirection() returns the direction of the Key, 0 == ingress, 1 == egress func (k LPMKey) TrafficDirection() trafficdirection.TrafficDirection { + // Note that 0 and 1 are the only possible return values, the shift below reduces the byte + // to a single bit. return trafficdirection.TrafficDirection(k.bits >> directionBitShift) } @@ -140,6 +142,10 @@ func (k LPMKey) PortPrefixLen() uint8 { return k.bits & ^directionBitMask } +func (k LPMKey) HasPortWildcard() bool { + return k.bits & ^directionBitMask < 16 +} + // String returns a string representation of the Key func (k Key) String() string { dPort := strconv.FormatUint(uint64(k.DestPort), 10) diff --git a/vendor/github.com/cilium/cilium/pkg/policy/visibility.go b/vendor/github.com/cilium/cilium/pkg/policy/visibility.go deleted file mode 100644 index 8a8a72c911..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/policy/visibility.go +++ /dev/null @@ -1,235 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package policy - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - ciliumio "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" - "github.com/cilium/cilium/pkg/labels" - "github.com/cilium/cilium/pkg/policy/api" - "github.com/cilium/cilium/pkg/u8proto" -) - -var ( - singleAnnotationRegex = "<(Ingress|Egress)/([1-9][0-9]{1,5})/(TCP|UDP|SCTP|ANY)/([A-Za-z]{3,32})>" - annotationRegex = regexp.MustCompile(fmt.Sprintf(`^((%s)(,(%s))*)$`, singleAnnotationRegex, singleAnnotationRegex)) -) - -func validateL7ProtocolWithDirection(dir string, proto L7ParserType) error { - switch proto { - case ParserTypeHTTP: - return nil - case ParserTypeDNS: - if dir == "Egress" { - return nil - } - case ParserTypeKafka: - return nil - default: - return fmt.Errorf("unsupported parser type %s", proto) - - } - return fmt.Errorf("%s not allowed with direction %s", proto, dir) -} - -// NewVisibilityPolicy generates the VisibilityPolicy that is encoded in the -// annotation parameter. -// Returns an error: -// - if the annotation does not correspond to the expected -// format for a visibility annotation. -// - if there is a conflict between the state encoded in the annotation (e.g., -// different L7 protocols for the same L4 port / protocol / traffic direction. -func NewVisibilityPolicy(anno, namespace, pod string) (*VisibilityPolicy, error) { - if !annotationRegex.MatchString(anno) { - return nil, fmt.Errorf("annotation for proxy visibility did not match expected format %s", annotationRegex.String()) - } - - nvp := &VisibilityPolicy{ - Ingress: make(DirectionalVisibilityPolicy), - Egress: make(DirectionalVisibilityPolicy), - } - - // TODO: look into using regex groups. - anSplit := strings.Split(anno, ",") - for i := range anSplit { - proxyAnnoSplit := strings.Split(anSplit[i], "/") - if len(proxyAnnoSplit) != 4 { - err := fmt.Errorf("invalid number of fields (%d) in annotation", len(proxyAnnoSplit)) - return nil, err - } - // Ingress|Egress - // Don't need to validate the content itself, regex already did that. - direction := proxyAnnoSplit[0][1:] - port := proxyAnnoSplit[1] - - portInt, err := strconv.ParseUint(port, 10, 16) - if err != nil { - return nil, fmt.Errorf("unable to parse port: %w", err) - } - - // Don't need to validate, regex already did that. - l4Proto := proxyAnnoSplit[2] - u8Prot, err := u8proto.ParseProtocol(l4Proto) - if err != nil { - return nil, fmt.Errorf("invalid L4 protocol %s", l4Proto) - } - - // ANY equates to TCP and UDP in the datapath; the datapath itself does - // not support 'Any' protocol paired with a port at L4. - var protos []u8proto.U8proto - if u8Prot == u8proto.ANY { - protos = append(protos, u8proto.TCP) - protos = append(protos, u8proto.UDP) - protos = append(protos, u8proto.SCTP) - } else { - protos = append(protos, u8Prot) - } - // Remove trailing '>'. - l7Protocol := L7ParserType(strings.ToLower(proxyAnnoSplit[3][:len(proxyAnnoSplit[3])-1])) - - if err := validateL7ProtocolWithDirection(direction, l7Protocol); err != nil { - return nil, err - } - - var dvp DirectionalVisibilityPolicy - var ingress bool - if direction == "Ingress" { - dvp = nvp.Ingress - ingress = true - } else { - dvp = nvp.Egress - ingress = false - } - - for _, prot := range protos { - pp := strconv.FormatUint(portInt, 10) + "/" + prot.String() - if res, ok := dvp[pp]; ok { - if res.Parser != l7Protocol { - return nil, fmt.Errorf("duplicate annotations with different L7 protocols %s and %s for %s", res.Parser, l7Protocol, pp) - } - } - - l7Meta := generateL7AllowAllRules(l7Protocol, namespace, pod) - - dvp[pp] = &VisibilityMetadata{ - Parser: l7Protocol, - Port: uint16(portInt), - Proto: prot, - Ingress: ingress, - L7Metadata: l7Meta, - } - } - } - - return nvp, nil -} - -func generateL7AllowAllRules(parser L7ParserType, namespace, pod string) L7DataMap { - var m L7DataMap - switch parser { - case ParserTypeDNS: - m = L7DataMap{} - // Create an entry to explicitly allow all at L7 for DNS. - emptyL3Selector := &identitySelector{source: &labelIdentitySelector{selector: api.WildcardEndpointSelector}, key: wildcardSelectorKey} - emptyL3Selector.metadataLbls = labels.LabelArray{ - labels.NewLabel(ciliumio.PolicyLabelDerivedFrom, "PodVisibilityAnnotation", labels.LabelSourceK8s), - } - if namespace != "" { - emptyL3Selector.metadataLbls = append(emptyL3Selector.metadataLbls, labels.NewLabel(ciliumio.PodNamespaceLabel, namespace, labels.LabelSourceK8s)) - } - if pod != "" { - emptyL3Selector.metadataLbls = append(emptyL3Selector.metadataLbls, labels.NewLabel(ciliumio.PodNameLabel, pod, labels.LabelSourceK8s)) - } - - m[emptyL3Selector] = &PerSelectorPolicy{ - L7Rules: api.L7Rules{ - DNS: []api.PortRuleDNS{ - { - MatchPattern: "*", - }, - }, - }, - } - } - return m -} - -// VisibilityMetadata encodes state about what type of traffic should be -// redirected to an L7Proxy. Implements the ProxyPolicy interface. -// TODO: an L4Filter could be composed of this type. -type VisibilityMetadata struct { - // Parser represents the proxy to which traffic should be redirected. - Parser L7ParserType - - // Port, in tandem with Proto, signifies which L4 port for which traffic - // should be redirected. - Port uint16 - - // Proto, in tandem with port, signifies which L4 protocol for which traffic - // should be redirected. - Proto u8proto.U8proto - - // Ingress specifies whether ingress traffic at the given L4 port / protocol - // should be redirected to the proxy. - Ingress bool - - // L7Metadata encodes optional information what is allowed at L7 for - // visibility. Some specific protocol parsers do not need this set for - // allowing of traffic (e.g., HTTP), but some do (e.g., DNS). - L7Metadata L7DataMap -} - -// DirectionalVisibilityPolicy is a mapping of VisibilityMetadata keyed by -// L4 Port / L4 Protocol (e.g., 80/TCP) for a given traffic direction (e.g., -// ingress or egress). This encodes at which L4 Port / L4 Protocol traffic -// should be redirected to a given L7 proxy. An empty instance of this type -// indicates that no traffic should be redirected. -type DirectionalVisibilityPolicy map[string]*VisibilityMetadata - -// VisibilityPolicy represents for both ingress and egress which types of -// traffic should be redirected to a given L7 proxy. -type VisibilityPolicy struct { - Ingress DirectionalVisibilityPolicy - Egress DirectionalVisibilityPolicy - Error error -} - -// CopyL7RulesPerEndpoint returns a shallow copy of the L7Metadata of the -// L4Filter. -func (v *VisibilityMetadata) CopyL7RulesPerEndpoint() L7DataMap { - if v.L7Metadata != nil { - return v.L7Metadata.ShallowCopy() - } - return nil -} - -// GetL7Parser returns the L7ParserType for this VisibilityMetadata. -func (v *VisibilityMetadata) GetL7Parser() L7ParserType { - return v.Parser -} - -// GetIngress returns whether the VisibilityMetadata applies at ingress or -// egress. -func (v *VisibilityMetadata) GetIngress() bool { - return v.Ingress -} - -// GetPort returns at which port the VisibilityMetadata applies. -func (v *VisibilityMetadata) GetPort() uint16 { - return v.Port -} - -// GetProtocol returns the protocol where the VisibilityMetadata applies. -func (v *VisibilityMetadata) GetProtocol() u8proto.U8proto { - return v.Proto -} - -// GetListener returns the optional listener name. -func (l4 *VisibilityMetadata) GetListener() string { - return "" -} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/error.go b/vendor/github.com/cilium/cilium/pkg/resiliency/error.go new file mode 100644 index 0000000000..cd348fc00e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/error.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +// retryableErr tracks errors that could be retried. +type retryableErr struct { + error +} + +// Retryable returns a new instance. +func Retryable(e error) retryableErr { + return retryableErr{error: e} +} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/errorset.go b/vendor/github.com/cilium/cilium/pkg/resiliency/errorset.go new file mode 100644 index 0000000000..9ac59e72d2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/errorset.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +import ( + "errors" + "fmt" +) + +type tuple struct { + index int + err error +} + +// ErrorSet tracks a collection of unique errors. +type ErrorSet struct { + total, failed int + msg string + errs map[string]tuple +} + +// NewErrorSet returns a new instance. +func NewErrorSet(msg string, c int) *ErrorSet { + return &ErrorSet{ + msg: msg, + total: c, + errs: make(map[string]tuple), + } +} + +// Add adds one or more errors to the set. +func (e *ErrorSet) Add(errs ...error) { + for _, err := range errs { + if err == nil { + continue + } + if _, ok := e.errs[err.Error()]; ok { + continue + } + e.errs[err.Error()] = tuple{index: e.failed, err: err} + e.failed++ + } +} + +// Error returns a list of unique errors or nil. +func (e *ErrorSet) Errors() []error { + if len(e.errs) == 0 { + return nil + } + errs := make([]error, len(e.errs)+1) + errs[0] = fmt.Errorf("%s (%d/%d) failed", e.msg, e.failed, e.total) + for _, t := range e.errs { + errs[t.index+1] = t.err + } + + return errs +} + +// Error returns a new composite error or nil. +func (e *ErrorSet) Error() error { + return errors.Join(e.Errors()...) +} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/helpers.go b/vendor/github.com/cilium/cilium/pkg/resiliency/helpers.go new file mode 100644 index 0000000000..0cd5d9cb8a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/helpers.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +// RetryFunc tracks resiliency retry calls. +type RetryFunc func(ctx context.Context, retries int) (bool, error) + +// Retry retries the provided call using exponential retries given an initial duration for up to max retries count. +func Retry(ctx context.Context, duration time.Duration, maxRetries int, fn RetryFunc) error { + bo := wait.Backoff{ + Duration: duration, + Factor: 1, + Jitter: 0.1, + Steps: maxRetries, + } + + var retries int + f := func(ctx context.Context) (bool, error) { + retries++ + return fn(ctx, retries) + } + + return wait.ExponentialBackoffWithContext(ctx, bo, f) +} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/retry.go b/vendor/github.com/cilium/cilium/pkg/resiliency/retry.go new file mode 100644 index 0000000000..a6f1bc6344 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/retry.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +import ( + "errors" +) + +// IsRetryable checks if an error can be retried. +func IsRetryable(e error) bool { + return errors.As(e, new(retryableErr)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/slices/slices.go b/vendor/github.com/cilium/cilium/pkg/slices/slices.go index 7cd2e860e4..9e81fb9551 100644 --- a/vendor/github.com/cilium/cilium/pkg/slices/slices.go +++ b/vendor/github.com/cilium/cilium/pkg/slices/slices.go @@ -84,24 +84,6 @@ func SortedUnique[S ~[]T, T cmp.Ordered](s S) S { return slices.Compact(s) } -// SortedUniqueFunc is like SortedUnique but allows the user to specify custom functions -// for ordering (less function) and comparing (eq function) the elements in the slice. -// This is useful in all the cases where SortedUnique cannot be used: -// - for types that do not satisfy constraints.Ordered (e.g: composite types) -// - when the user wants to customize how elements are compared (e.g: user wants to enforce reverse ordering) -func SortedUniqueFunc[S ~[]T, T any]( - s S, - less func(a, b T) int, - eq func(a, b T) bool, -) S { - if len(s) < 2 { - return s - } - - slices.SortFunc(s, less) - return slices.CompactFunc(s, eq) -} - // Diff returns a slice of elements which is the difference of a and b. // The returned slice keeps the elements in the same order found in the "a" slice. // Both input slices are considered as sets, that is, all elements are considered as @@ -149,3 +131,15 @@ func XorNil[T any](s1, s2 []T) bool { return s1 == nil && s2 != nil || s1 != nil && s2 == nil } + +// AllMatch returns true if pred is true for each element in s, false otherwise. +// May not evaluate on all elements if not necessary for determining the result. +// If the slice is empty then true is returned and predicate is not evaluated. +func AllMatch[T any](s []T, pred func(v T) bool) bool { + for _, v := range s { + if !pred(v) { + return false + } + } + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go b/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go index 7afae25872..48c89e9ad0 100644 --- a/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go +++ b/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go @@ -6,7 +6,6 @@ package trigger import ( "fmt" - "github.com/cilium/cilium/pkg/inctimer" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/time" ) @@ -173,8 +172,9 @@ func (t *Trigger) Shutdown() { } func (t *Trigger) waiter() { - sleepTimer, sleepTimerDone := inctimer.New() - defer sleepTimerDone() + tk := time.NewTicker(t.params.sleepInterval) + defer tk.Stop() + for { // keep critical section as small as possible t.mutex.Lock() @@ -208,8 +208,7 @@ func (t *Trigger) waiter() { select { case <-t.wakeupChan: - case <-sleepTimer.After(t.params.sleepInterval): - + case <-tk.C: case <-t.closeChan: shutdownFunc := t.params.ShutdownFunc if shutdownFunc != nil { diff --git a/vendor/github.com/cilium/hive/cell/lifecycle.go b/vendor/github.com/cilium/hive/cell/lifecycle.go index cbe41eebd9..a41f75ee2c 100644 --- a/vendor/github.com/cilium/hive/cell/lifecycle.go +++ b/vendor/github.com/cilium/hive/cell/lifecycle.go @@ -75,6 +75,19 @@ type augmentedHook struct { moduleID FullModuleID } +func NewDefaultLifecycle(hooks []HookInterface, numStarted int, logThreshold time.Duration) *DefaultLifecycle { + h := make([]augmentedHook, 0, len(hooks)) + for _, hook := range hooks { + h = append(h, augmentedHook{hook, nil}) + } + return &DefaultLifecycle{ + mu: sync.Mutex{}, + hooks: h, + numStarted: numStarted, + LogThreshold: logThreshold, + } +} + func (lc *DefaultLifecycle) Append(hook HookInterface) { lc.mu.Lock() defer lc.mu.Unlock() @@ -92,7 +105,7 @@ func (lc *DefaultLifecycle) Start(log *slog.Logger, ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - for _, hook := range lc.hooks { + for i, hook := range lc.hooks { fnName, exists := getHookFuncName(hook, true) if !exists { @@ -102,6 +115,13 @@ func (lc *DefaultLifecycle) Start(log *slog.Logger, ctx context.Context) error { } l := log.With("function", fnName) + + // Do not attempt to start already started hooks. + if i < lc.numStarted { + l.Error("Hook appears to be running. Skipping") + continue + } + l.Debug("Executing start hook") t0 := time.Now() if err := hook.Start(ctx); err != nil { diff --git a/vendor/github.com/cilium/hive/cell/simple_health.go b/vendor/github.com/cilium/hive/cell/simple_health.go index 49806c1540..60da0ef46d 100644 --- a/vendor/github.com/cilium/hive/cell/simple_health.go +++ b/vendor/github.com/cilium/hive/cell/simple_health.go @@ -1,7 +1,15 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + package cell import ( + "fmt" + "regexp" "sync" + "time" + + "github.com/cilium/hive/script" ) type simpleHealthRoot struct { @@ -87,6 +95,57 @@ func NewSimpleHealth() (Health, *SimpleHealth) { return h, h } +// SimpleHealthCmd for showing or checking the simple module health state. +// Not provided as hive.ScriptCmdOut due to cyclic import issues. To include +// provide with: hive.ScriptCmdOut("health", SimpleHealthCmd(simpleHealth))) +// +// Example: +// +// # show health +// health +// +// # grep health +// health 'my-module: level=OK' +func SimpleHealthCmd(h *SimpleHealth) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Show or grep simple health", + Args: "(pattern)", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + var re *regexp.Regexp + if len(args) == 1 { + re = regexp.MustCompile(args[0]) + } + for s.Context().Err() == nil { + h.Lock() + for name, h := range h.all { + var errStr string + if h.Error != nil { + errStr = h.Error.Error() + } + line := fmt.Sprintf("%s: level=%s message=%s error=%s", name, h.Level, h.Status, errStr) + if re != nil { + if re.Match([]byte(line)) { + h.Unlock() + s.Logf("matched: %s\n", line) + return nil, nil + } + } else { + fmt.Fprintln(s.LogWriter(), line) + } + } + h.Unlock() + if re == nil { + return nil, nil + } + time.Sleep(10 * time.Millisecond) + } + return nil, fmt.Errorf("no match for %s", re) + }, + ) +} + var _ Health = &SimpleHealth{} var SimpleHealthCell = Provide(NewSimpleHealth) diff --git a/vendor/github.com/cilium/hive/hive.go b/vendor/github.com/cilium/hive/hive.go index 34e17b600b..4dd80d6afa 100644 --- a/vendor/github.com/cilium/hive/hive.go +++ b/vendor/github.com/cilium/hive/hive.go @@ -20,6 +20,7 @@ import ( "go.uber.org/dig" "github.com/cilium/hive/cell" + "github.com/cilium/hive/script" ) type Options struct { @@ -421,3 +422,19 @@ func (h *Hive) getEnvName(option string) string { upper := strings.ToUpper(under) return h.opts.EnvPrefix + upper } + +func (h *Hive) ScriptCommands(log *slog.Logger) (map[string]script.Cmd, error) { + if err := h.Populate(log); err != nil { + return nil, fmt.Errorf("failed to populate object graph: %s", err) + } + m := map[string]script.Cmd{} + m["hive"] = hiveScriptCmd(h, log) + + // Gather the commands from the hive. + h.container.Invoke(func(sc ScriptCmds) { + for name, cmd := range sc.Map() { + m[name] = cmd + } + }) + return m, nil +} diff --git a/vendor/github.com/cilium/hive/script.go b/vendor/github.com/cilium/hive/script.go new file mode 100644 index 0000000000..72d1e782a5 --- /dev/null +++ b/vendor/github.com/cilium/hive/script.go @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package hive + +import ( + "context" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/signal" + "time" + + "github.com/cilium/hive/cell" + "github.com/cilium/hive/script" + "golang.org/x/term" +) + +func NewScriptCmd(name string, cmd script.Cmd) ScriptCmdOut { + return ScriptCmdOut{ScriptCmd: ScriptCmd{name, cmd}} +} + +func NewScriptCmds(cmds map[string]script.Cmd) (out ScriptCmdsOut) { + out.ScriptCmds = make([]ScriptCmd, 0, len(cmds)) + for name, cmd := range cmds { + out.ScriptCmds = append(out.ScriptCmds, ScriptCmd{name, cmd}) + } + return out +} + +type ScriptCmd struct { + Name string + Cmd script.Cmd +} + +type ScriptCmds struct { + cell.In + + ScriptCmds []ScriptCmd `group:"script-commands"` +} + +func (sc ScriptCmds) Map() map[string]script.Cmd { + m := make(map[string]script.Cmd, len(sc.ScriptCmds)) + for _, c := range sc.ScriptCmds { + m[c.Name] = c.Cmd + } + return m +} + +type ScriptCmdOut struct { + cell.Out + + ScriptCmd ScriptCmd `group:"script-commands"` +} + +type ScriptCmdsOut struct { + cell.Out + + ScriptCmds []ScriptCmd `group:"script-commands,flatten"` +} + +func hiveScriptCmd(h *Hive, log *slog.Logger) script.Cmd { + const defaultTimeout = time.Minute + return script.Command( + script.CmdUsage{ + Summary: "manipulate the hive", + Args: "cmd args...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + if len(args) < 1 { + return nil, fmt.Errorf("hive cmd args...\n'cmd' is one of: start, stop, jobs") + } + switch args[0] { + case "start": + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + return nil, h.Start(log, ctx) + case "stop": + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + return nil, h.Stop(log, ctx) + } + return nil, fmt.Errorf("unknown hive command %q, expected one of: start, stop, jobs", args[0]) + }, + ) +} + +func RunRepl(h *Hive, in *os.File, out *os.File, prompt string) { + // Try to set the input into raw mode. + restore, err := script.MakeRaw(int(in.Fd())) + defer restore() + + inout := struct { + io.Reader + io.Writer + }{in, out} + term := term.NewTerminal(inout, prompt) + log := slog.New(slog.NewTextHandler(term, nil)) + + cmds, err := h.ScriptCommands(log) + if err != nil { + log.Error("ScriptCommands()", "error", err) + return + } + for name, cmd := range script.DefaultCmds() { + cmds[name] = cmd + } + + e := script.Engine{ + Cmds: cmds, + Conds: nil, + } + + stop := make(chan struct{}) + defer close(stop) + + sigs := make(chan os.Signal, 1) + defer signal.Stop(sigs) + signal.Notify(sigs, os.Interrupt) + + newState := func() *script.State { + ctx, cancel := context.WithCancel(context.Background()) + s, err := script.NewState(ctx, "/tmp", nil) + if err != nil { + panic(err) + } + go func() { + select { + case <-stop: + cancel() + case <-sigs: + cancel() + } + }() + return s + } + + s := newState() + + for { + line, err := term.ReadLine() + if err != nil { + if errors.Is(err, io.EOF) { + return + } else { + panic(err) + } + } + + err = e.ExecuteLine(s, line, term) + if err != nil { + fmt.Fprintln(term, err.Error()) + } + + if s.Context().Err() != nil { + // Context was cancelled due to interrupt. Re-create the state + // to run more commands. + s = newState() + fmt.Fprintln(term, "^C (interrupted)") + } + } +} diff --git a/vendor/github.com/cilium/hive/script/LICENSE b/vendor/github.com/cilium/hive/script/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/cilium/hive/script/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cilium/hive/script/README.md b/vendor/github.com/cilium/hive/script/README.md new file mode 100644 index 0000000000..6b6da3a3fa --- /dev/null +++ b/vendor/github.com/cilium/hive/script/README.md @@ -0,0 +1,4 @@ +This is a fork of rsc.io/script (v0.0.2). It mostly adds support for interactive use to it. + +The makeraw* files are adapted from term_unix.go etc. files from x/term to enable interrupts. + diff --git a/vendor/github.com/cilium/hive/script/README.md.original b/vendor/github.com/cilium/hive/script/README.md.original new file mode 100644 index 0000000000..0716f680e3 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/README.md.original @@ -0,0 +1,11 @@ +This is a copy of cmd/go/internal/script. + +See and . +Posting it here makes it available for others to try +without us committing to officially supporting it. +We have been using it in the go command for many years now; +the code is quite stable. +Ironically, it has very few tests. + + is a port +of an earlier version of the go command script language. diff --git a/vendor/github.com/cilium/hive/script/cmds.go b/vendor/github.com/cilium/hive/script/cmds.go new file mode 100644 index 0000000000..acea4128bf --- /dev/null +++ b/vendor/github.com/cilium/hive/script/cmds.go @@ -0,0 +1,1167 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "errors" + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/cilium/hive/script/internal/diff" + "golang.org/x/term" +) + +// DefaultCmds returns a set of broadly useful script commands. +// +// Run the 'help' command within a script engine to view a list of the available +// commands. +func DefaultCmds() map[string]Cmd { + return map[string]Cmd{ + "cat": Cat(), + "cd": Cd(), + "chmod": Chmod(), + "cmp": Cmp(), + "cmpenv": Cmpenv(), + "cp": Cp(), + "echo": Echo(), + "env": Env(), + "exec": Exec(func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) }, 100*time.Millisecond), // arbitrary grace period + "exists": Exists(), + "grep": Grep(), + "help": Help(), + "mkdir": Mkdir(), + "mv": Mv(), + "rm": Rm(), + "replace": Replace(), + "sleep": Sleep(), + "stderr": Stderr(), + "stdout": Stdout(), + "stop": Stop(), + "symlink": Symlink(), + "wait": Wait(), + "break": Break(), + } +} + +// Command returns a new Cmd with a Usage method that returns a copy of the +// given CmdUsage and a Run method calls the given function. +func Command(usage CmdUsage, run func(*State, ...string) (WaitFunc, error)) Cmd { + return &funcCmd{ + usage: usage, + run: run, + } +} + +// A funcCmd implements Cmd using a function value. +type funcCmd struct { + usage CmdUsage + run func(*State, ...string) (WaitFunc, error) +} + +func (c *funcCmd) Run(s *State, args ...string) (WaitFunc, error) { + return c.run(s, args...) +} + +func (c *funcCmd) Usage() *CmdUsage { return &c.usage } + +// firstNonFlag returns a slice containing the index of the first argument in +// rawArgs that is not a flag, or nil if all arguments are flags. +func firstNonFlag(rawArgs ...string) []int { + for i, arg := range rawArgs { + if !strings.HasPrefix(arg, "-") { + return []int{i} + } + if arg == "--" { + return []int{i + 1} + } + } + return nil +} + +// Cat writes the concatenated contents of the named file(s) to the script's +// stdout buffer. +func Cat() Cmd { + return Command( + CmdUsage{ + Summary: "concatenate files and print to the script's stdout buffer", + Args: "files...", + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) == 0 { + return nil, ErrUsage + } + + paths := make([]string, 0, len(args)) + for _, arg := range args { + paths = append(paths, s.Path(arg)) + } + + var buf strings.Builder + errc := make(chan error, 1) + go func() { + for _, p := range paths { + b, err := os.ReadFile(p) + buf.Write(b) + if err != nil { + errc <- err + return + } + } + errc <- nil + }() + + wait := func(*State) (stdout, stderr string, err error) { + err = <-errc + return buf.String(), "", err + } + return wait, nil + }) +} + +// Cd changes the current working directory. +func Cd() Cmd { + return Command( + CmdUsage{ + Summary: "change the working directory", + Args: "dir", + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 1 { + return nil, ErrUsage + } + return nil, s.Chdir(args[0]) + }) +} + +// Chmod changes the permissions of a file or a directory.. +func Chmod() Cmd { + return Command( + CmdUsage{ + Summary: "change file mode bits", + Args: "perm paths...", + Detail: []string{ + "Changes the permissions of the named files or directories to be equal to perm.", + "Only numerical permissions are supported.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 2 { + return nil, ErrUsage + } + + perm, err := strconv.ParseUint(args[0], 0, 32) + if err != nil || perm&uint64(fs.ModePerm) != perm { + return nil, fmt.Errorf("invalid mode: %s", args[0]) + } + + for _, arg := range args[1:] { + err := os.Chmod(s.Path(arg), fs.FileMode(perm)) + if err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// Cmp compares the contents of two files, or the contents of either the +// "stdout" or "stderr" buffer and a file, returning a non-nil error if the +// contents differ. +func Cmp() Cmd { + return Command( + CmdUsage{ + Args: "[-q] file1 file2", + Summary: "compare files for differences", + Detail: []string{ + "By convention, file1 is the actual data and file2 is the expected data.", + "The command succeeds if the file contents are identical.", + "File1 can be 'stdout' or 'stderr' to compare the stdout or stderr buffer from the most recent command.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, doCompare(s, false, args...) + }) +} + +// Cmpenv is like Compare, but also performs environment substitutions +// on the contents of both arguments. +func Cmpenv() Cmd { + return Command( + CmdUsage{ + Args: "[-q] file1 file2", + Summary: "compare files for differences, with environment expansion", + Detail: []string{ + "By convention, file1 is the actual data and file2 is the expected data.", + "The command succeeds if the file contents are identical after substituting variables from the script environment.", + "File1 can be 'stdout' or 'stderr' to compare the script's stdout or stderr buffer.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, doCompare(s, true, args...) + }) +} + +func doCompare(s *State, env bool, args ...string) error { + quiet := false + if len(args) > 0 && args[0] == "-q" { + quiet = true + args = args[1:] + } + if len(args) != 2 { + return ErrUsage + } + + name1, name2 := args[0], args[1] + var text1, text2 string + switch name1 { + case "stdout": + text1 = s.Stdout() + case "stderr": + text1 = s.Stderr() + default: + data, err := os.ReadFile(s.Path(name1)) + if err != nil { + return err + } + text1 = string(data) + } + + data, err := os.ReadFile(s.Path(name2)) + if err != nil { + return err + } + text2 = string(data) + + if env { + text1 = s.ExpandEnv(text1, false) + text2 = s.ExpandEnv(text2, false) + } + + if text1 != text2 { + if !quiet { + diffText := diff.Diff(name1, []byte(text1), name2, []byte(text2)) + s.Logf("%s\n", diffText) + } + return fmt.Errorf("%s and %s differ", name1, name2) + } + return nil +} + +// Cp copies one or more files to a new location. +func Cp() Cmd { + return Command( + CmdUsage{ + Summary: "copy files to a target file or directory", + Args: "src... dst", + Detail: []string{ + "src can include 'stdout' or 'stderr' to copy from the script's stdout or stderr buffer.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 2 { + return nil, ErrUsage + } + + dst := s.Path(args[len(args)-1]) + info, err := os.Stat(dst) + dstDir := err == nil && info.IsDir() + if len(args) > 2 && !dstDir { + return nil, &fs.PathError{Op: "cp", Path: dst, Err: errors.New("destination is not a directory")} + } + + for _, arg := range args[:len(args)-1] { + var ( + src string + data []byte + mode fs.FileMode + ) + switch arg { + case "stdout": + src = arg + data = []byte(s.Stdout()) + mode = 0666 + case "stderr": + src = arg + data = []byte(s.Stderr()) + mode = 0666 + default: + src = s.Path(arg) + info, err := os.Stat(src) + if err != nil { + return nil, err + } + mode = info.Mode() & 0777 + data, err = os.ReadFile(src) + if err != nil { + return nil, err + } + } + targ := dst + if dstDir { + targ = filepath.Join(dst, filepath.Base(src)) + } + err := os.WriteFile(targ, data, mode) + if err != nil { + return nil, err + } + } + + return nil, nil + }) +} + +// Echo writes its arguments to stdout, followed by a newline. +func Echo() Cmd { + return Command( + CmdUsage{ + Summary: "display a line of text", + Args: "string...", + }, + func(s *State, args ...string) (WaitFunc, error) { + var buf strings.Builder + for i, arg := range args { + if i > 0 { + buf.WriteString(" ") + } + buf.WriteString(arg) + } + buf.WriteString("\n") + out := buf.String() + + // Stuff the result into a callback to satisfy the OutputCommandFunc + // interface, even though it isn't really asynchronous even if run in the + // background. + // + // Nobody should be running 'echo' as a background command, but it's not worth + // defining yet another interface, and also doesn't seem worth shoehorning + // into a SimpleCommand the way we did with Wait. + return func(*State) (stdout, stderr string, err error) { + return out, "", nil + }, nil + }) +} + +// Env sets or logs the values of environment variables. +// +// With no arguments, Env reports all variables in the environment. +// "key=value" arguments set variables, and arguments without "=" +// cause the corresponding value to be printed to the stdout buffer. +func Env() Cmd { + return Command( + CmdUsage{ + Summary: "set or log the values of environment variables", + Args: "[key[=value]...]", + Detail: []string{ + "With no arguments, print the script environment to the log.", + "Otherwise, add the listed key=value pairs to the environment or print the listed keys.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + out := new(strings.Builder) + if len(args) == 0 { + for _, kv := range s.env { + fmt.Fprintf(out, "%s\n", kv) + } + } else { + for _, env := range args { + i := strings.Index(env, "=") + if i < 0 { + // Display value instead of setting it. + fmt.Fprintf(out, "%s=%s\n", env, s.envMap[env]) + continue + } + if err := s.Setenv(env[:i], env[i+1:]); err != nil { + return nil, err + } + } + } + var wait WaitFunc + if out.Len() > 0 || len(args) == 0 { + wait = func(*State) (stdout, stderr string, err error) { + return out.String(), "", nil + } + } + return wait, nil + }) +} + +// Exec runs an arbitrary executable as a subprocess. +// +// When the Script's context is canceled, Exec sends the interrupt signal, then +// waits for up to the given delay for the subprocess to flush output before +// terminating it with os.Kill. +func Exec(cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd { + return Command( + CmdUsage{ + Summary: "run an executable program with arguments", + Args: "program [args...]", + Detail: []string{ + "Note that 'exec' does not terminate the script (unlike Unix shells).", + }, + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + + // Use the script's PATH to look up the command (if it does not contain a separator) + // instead of the test process's PATH (see lookPath). + // Don't use filepath.Clean, since that changes "./foo" to "foo". + name := filepath.FromSlash(args[0]) + path := name + if !strings.Contains(name, string(filepath.Separator)) { + var err error + path, err = lookPath(s, name) + if err != nil { + return nil, err + } + } + + return startCommand(s, name, path, args[1:], cancel, waitDelay) + }) +} + +func startCommand(s *State, name, path string, args []string, cancel func(*exec.Cmd) error, waitDelay time.Duration) (WaitFunc, error) { + var ( + cmd *exec.Cmd + stdoutBuf, stderrBuf strings.Builder + ) + for { + cmd = exec.CommandContext(s.Context(), path, args...) + if cancel == nil { + cmd.Cancel = nil + } else { + cmd.Cancel = func() error { return cancel(cmd) } + } + cmd.WaitDelay = waitDelay + cmd.Args[0] = name + cmd.Dir = s.Getwd() + cmd.Env = s.env + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + err := cmd.Start() + if err == nil { + break + } + if isETXTBSY(err) { + // If the script (or its host process) just wrote the executable we're + // trying to run, a fork+exec in another thread may be holding open the FD + // that we used to write the executable (see https://go.dev/issue/22315). + // Since the descriptor should have CLOEXEC set, the problem should + // resolve as soon as the forked child reaches its exec call. + // Keep retrying until that happens. + } else { + return nil, err + } + } + + wait := func(s *State) (stdout, stderr string, err error) { + err = cmd.Wait() + return stdoutBuf.String(), stderrBuf.String(), err + } + return wait, nil +} + +// lookPath is (roughly) like exec.LookPath, but it uses the script's current +// PATH to find the executable. +func lookPath(s *State, command string) (string, error) { + var strEqual func(string, string) bool + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Using GOOS as a proxy for case-insensitive file system. + // TODO(bcmills): Remove this assumption. + strEqual = strings.EqualFold + } else { + strEqual = func(a, b string) bool { return a == b } + } + + var pathExt []string + var searchExt bool + var isExecutable func(os.FileInfo) bool + if runtime.GOOS == "windows" { + // Use the test process's PathExt instead of the script's. + // If PathExt is set in the command's environment, cmd.Start fails with + // "parameter is invalid". Not sure why. + // If the command already has an extension in PathExt (like "cmd.exe") + // don't search for other extensions (not "cmd.bat.exe"). + pathExt = strings.Split(os.Getenv("PathExt"), string(filepath.ListSeparator)) + searchExt = true + cmdExt := filepath.Ext(command) + for _, ext := range pathExt { + if strEqual(cmdExt, ext) { + searchExt = false + break + } + } + isExecutable = func(fi os.FileInfo) bool { + return fi.Mode().IsRegular() + } + } else { + isExecutable = func(fi os.FileInfo) bool { + return fi.Mode().IsRegular() && fi.Mode().Perm()&0111 != 0 + } + } + + pathEnv, _ := s.LookupEnv(pathEnvName()) + for _, dir := range strings.Split(pathEnv, string(filepath.ListSeparator)) { + if dir == "" { + continue + } + + // Determine whether dir needs a trailing path separator. + // Note: we avoid filepath.Join in this function because it cleans the + // result: we want to preserve the exact dir prefix from the environment. + sep := string(filepath.Separator) + if os.IsPathSeparator(dir[len(dir)-1]) { + sep = "" + } + + if searchExt { + ents, err := os.ReadDir(dir) + if err != nil { + continue + } + for _, ent := range ents { + for _, ext := range pathExt { + if !ent.IsDir() && strEqual(ent.Name(), command+ext) { + return dir + sep + ent.Name(), nil + } + } + } + } else { + path := dir + sep + command + if fi, err := os.Stat(path); err == nil && isExecutable(fi) { + return path, nil + } + } + } + return "", &exec.Error{Name: command, Err: exec.ErrNotFound} +} + +// pathEnvName returns the platform-specific variable used by os/exec.LookPath +// to look up executable names (either "PATH" or "path"). +// +// TODO(bcmills): Investigate whether we can instead use PATH uniformly and +// rewrite it to $path when executing subprocesses. +func pathEnvName() string { + switch runtime.GOOS { + case "plan9": + return "path" + default: + return "PATH" + } +} + +// Exists checks that the named file(s) exist. +func Exists() Cmd { + return Command( + CmdUsage{ + Summary: "check that files exist", + Args: "[-readonly] [-exec] file...", + }, + func(s *State, args ...string) (WaitFunc, error) { + var readonly, exec bool + loop: + for len(args) > 0 { + switch args[0] { + case "-readonly": + readonly = true + args = args[1:] + case "-exec": + exec = true + args = args[1:] + default: + break loop + } + } + if len(args) == 0 { + return nil, ErrUsage + } + + for _, file := range args { + file = s.Path(file) + info, err := os.Stat(file) + if err != nil { + return nil, err + } + if readonly && info.Mode()&0222 != 0 { + return nil, fmt.Errorf("%s exists but is writable", file) + } + if exec && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { + return nil, fmt.Errorf("%s exists but is not executable", file) + } + } + + return nil, nil + }) +} + +// Grep checks that file content matches a regexp. +// Like stdout/stderr and unlike Unix grep, it accepts Go regexp syntax. +// +// Grep does not modify the State's stdout or stderr buffers. +// (Its output goes to the script log, not stdout.) +func Grep() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in a file that match a pattern", + Args: matchUsage + " file", + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, "", "grep") + }) +} + +const matchUsage = "[-count=N] [-q] 'pattern'" + +// match implements the Grep, Stdout, and Stderr commands. +func match(s *State, args []string, text, name string) error { + n := 0 + if len(args) >= 1 && strings.HasPrefix(args[0], "-count=") { + var err error + n, err = strconv.Atoi(args[0][len("-count="):]) + if err != nil { + return fmt.Errorf("bad -count=: %v", err) + } + if n < 1 { + return fmt.Errorf("bad -count=: must be at least 1") + } + args = args[1:] + } + quiet := false + if len(args) >= 1 && args[0] == "-q" { + quiet = true + args = args[1:] + } + + isGrep := name == "grep" + + wantArgs := 1 + if len(args) != wantArgs { + return ErrUsage + } + + pattern := `(?m)` + args[0] + re, err := regexp.Compile(pattern) + if err != nil { + return err + } + + if isGrep { + if len(args) == 1 || args[1] == "-" { + text = s.stdout + } else { + name = args[1] // for error messages + data, err := os.ReadFile(s.Path(args[1])) + if err != nil { + return err + } + text = string(data) + } + } + + if n > 0 { + count := len(re.FindAllString(text, -1)) + if count != n { + return fmt.Errorf("found %d matches for %#q in %s", count, pattern, name) + } + return nil + } + + if !re.MatchString(text) { + return fmt.Errorf("no match for %#q in %s", pattern, name) + } + + if !quiet { + // Print the lines containing the match. + loc := re.FindStringIndex(text) + for loc[0] > 0 && text[loc[0]-1] != '\n' { + loc[0]-- + } + for loc[1] < len(text) && text[loc[1]] != '\n' { + loc[1]++ + } + lines := strings.TrimSuffix(text[loc[0]:loc[1]], "\n") + s.Logf("matched: %s\n", lines) + } + return nil +} + +// Help writes command documentation to the script log. +func Help() Cmd { + return Command( + CmdUsage{ + Summary: "log help text for commands and conditions", + Args: "[-v] name...", + Detail: []string{ + "To display help for a specific condition, enclose it in brackets: 'help [amd64]'.", + "To display complete documentation when listing all commands, pass the -v flag.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if s.engine == nil { + return nil, errors.New("no engine configured") + } + + verbose := false + if len(args) > 0 { + verbose = true + if args[0] == "-v" { + args = args[1:] + } + } + + var cmds, conds []string + for _, arg := range args { + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + conds = append(conds, arg[1:len(arg)-1]) + } else { + cmds = append(cmds, arg) + } + } + + out := new(strings.Builder) + + if len(conds) > 0 || (len(args) == 0 && len(s.engine.Conds) > 0) { + if conds == nil { + out.WriteString("conditions:\n\n") + } + s.engine.ListConds(out, s, conds...) + } + + if len(cmds) > 0 || len(args) == 0 { + if len(args) == 0 { + out.WriteString("\ncommands:\n\n") + } + s.engine.ListCmds(out, verbose, cmds...) + } + + wait := func(*State) (stdout, stderr string, err error) { + return out.String(), "", nil + } + return wait, nil + }) +} + +// Mkdir creates a directory and any needed parent directories. +func Mkdir() Cmd { + return Command( + CmdUsage{ + Summary: "create directories, if they do not already exist", + Args: "path...", + Detail: []string{ + "Unlike Unix mkdir, parent directories are always created if needed.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + for _, arg := range args { + if err := os.MkdirAll(s.Path(arg), 0777); err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// Mv renames an existing file or directory to a new path. +func Mv() Cmd { + return Command( + CmdUsage{ + Summary: "rename a file or directory to a new path", + Args: "old new", + Detail: []string{ + "OS-specific restrictions may apply when old and new are in different directories.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 2 { + return nil, ErrUsage + } + return nil, os.Rename(s.Path(args[0]), s.Path(args[1])) + }) +} + +// Program returns a new command that runs the named program, found from the +// host process's PATH (not looked up in the script's PATH). +func Program(name string, cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd { + var ( + shortName string + summary string + lookPathOnce sync.Once + path string + pathErr error + ) + if filepath.IsAbs(name) { + lookPathOnce.Do(func() { path = filepath.Clean(name) }) + shortName = strings.TrimSuffix(filepath.Base(path), ".exe") + summary = "run the '" + shortName + "' program provided by the script host" + } else { + shortName = name + summary = "run the '" + shortName + "' program from the script host's PATH" + } + + return Command( + CmdUsage{ + Summary: summary, + Args: "[args...]", + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + lookPathOnce.Do(func() { + path, pathErr = exec.LookPath(name) + }) + if pathErr != nil { + return nil, pathErr + } + return startCommand(s, shortName, path, args, cancel, waitDelay) + }) +} + +// Replace replaces all occurrences of a string in a file with another string. +func Replace() Cmd { + return Command( + CmdUsage{ + Summary: "replace strings in a file", + Args: "[old new]... file", + Detail: []string{ + "The 'old' and 'new' arguments are unquoted as if in quoted Go strings.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args)%2 != 1 { + return nil, ErrUsage + } + + oldNew := make([]string, 0, len(args)-1) + for _, arg := range args[:len(args)-1] { + s, err := strconv.Unquote(`"` + arg + `"`) + if err != nil { + return nil, err + } + oldNew = append(oldNew, s) + } + + r := strings.NewReplacer(oldNew...) + file := s.Path(args[len(args)-1]) + + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + replaced := r.Replace(string(data)) + + return nil, os.WriteFile(file, []byte(replaced), 0666) + }) +} + +// Rm removes a file or directory. +// +// If a directory, Rm also recursively removes that directory's +// contents. +func Rm() Cmd { + return Command( + CmdUsage{ + Summary: "remove a file or directory", + Args: "path...", + Detail: []string{ + "If the path is a directory, its contents are removed recursively.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + for _, arg := range args { + if err := removeAll(s.Path(arg)); err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// removeAll removes dir and all files and directories it contains. +// +// Unlike os.RemoveAll, removeAll attempts to make the directories writable if +// needed in order to remove their contents. +func removeAll(dir string) error { + // module cache has 0444 directories; + // make them writable in order to remove content. + filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { + // chmod not only directories, but also things that we couldn't even stat + // due to permission errors: they may also be unreadable directories. + if err != nil || info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return os.RemoveAll(dir) +} + +// Sleep sleeps for the given Go duration or until the script's context is +// cancelled, whichever happens first. +func Sleep() Cmd { + return Command( + CmdUsage{ + Summary: "sleep for a specified duration", + Args: "duration", + Detail: []string{ + "The duration must be given as a Go time.Duration string.", + }, + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 1 { + return nil, ErrUsage + } + + d, err := time.ParseDuration(args[0]) + if err != nil { + return nil, err + } + + timer := time.NewTimer(d) + wait := func(s *State) (stdout, stderr string, err error) { + ctx := s.Context() + select { + case <-ctx.Done(): + timer.Stop() + return "", "", ctx.Err() + case <-timer.C: + return "", "", nil + } + } + return wait, nil + }) +} + +// Stderr searches for a regular expression in the stderr buffer. +func Stderr() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in the stderr buffer that match a pattern", + Args: matchUsage + " file", + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, s.Stderr(), "stderr") + }) +} + +// Stdout searches for a regular expression in the stdout buffer. +func Stdout() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in the stdout buffer that match a pattern", + Args: matchUsage + " file", + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, s.Stdout(), "stdout") + }) +} + +// Stop returns a sentinel error that causes script execution to halt +// and s.Execute to return with a nil error. +func Stop() Cmd { + return Command( + CmdUsage{ + Summary: "stop execution of the script", + Args: "[msg]", + Detail: []string{ + "The message is written to the script log, but no error is reported from the script engine.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) > 1 { + return nil, ErrUsage + } + // TODO(bcmills): The argument passed to stop seems redundant with comments. + // Either use it systematically or remove it. + if len(args) == 1 { + return nil, stopError{msg: args[0]} + } + return nil, stopError{} + }) +} + +// stopError is the sentinel error type returned by the Stop command. +type stopError struct { + msg string +} + +func (s stopError) Error() string { + if s.msg == "" { + return "stop" + } + return "stop: " + s.msg +} + +// Symlink creates a symbolic link. +func Symlink() Cmd { + return Command( + CmdUsage{ + Summary: "create a symlink", + Args: "path -> target", + Detail: []string{ + "Creates path as a symlink to target.", + "The '->' token (like in 'ls -l' output on Unix) is required.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 3 || args[1] != "->" { + return nil, ErrUsage + } + + // Note that the link target args[2] is not interpreted with s.Path: + // it will be interpreted relative to the directory file is in. + return nil, os.Symlink(filepath.FromSlash(args[2]), s.Path(args[0])) + }) +} + +// Wait waits for the completion of background commands. +// +// When Wait returns, the stdout and stderr buffers contain the concatenation of +// the background commands' respective outputs in the order in which those +// commands were started. +func Wait() Cmd { + return Command( + CmdUsage{ + Summary: "wait for completion of background commands", + Args: "", + Detail: []string{ + "Waits for all background commands to complete.", + "The output (and any error) from each command is printed to the log in the order in which the commands were started.", + "After the call to 'wait', the script's stdout and stderr buffers contain the concatenation of the background commands' outputs.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) > 0 { + return nil, ErrUsage + } + + var stdouts, stderrs []string + var errs []*CommandError + for _, bg := range s.background { + stdout, stderr, err := bg.wait(s) + + beforeArgs := "" + if len(bg.args) > 0 { + beforeArgs = " " + } + s.Logf("[background] %s%s%s\n", bg.name, beforeArgs, quoteArgs(bg.args)) + + if stdout != "" { + s.Logf("[stdout]\n%s", stdout) + stdouts = append(stdouts, stdout) + } + if stderr != "" { + s.Logf("[stderr]\n%s", stderr) + stderrs = append(stderrs, stderr) + } + if err != nil { + s.Logf("[%v]\n", err) + } + if cmdErr := checkStatus(bg.command, err); cmdErr != nil { + errs = append(errs, cmdErr.(*CommandError)) + } + } + + s.stdout = strings.Join(stdouts, "") + s.stderr = strings.Join(stderrs, "") + s.background = nil + if len(errs) > 0 { + return nil, waitError{errs: errs} + } + return nil, nil + }) +} + +// A waitError wraps one or more errors returned by background commands. +type waitError struct { + errs []*CommandError +} + +func (w waitError) Error() string { + b := new(strings.Builder) + for i, err := range w.errs { + if i != 0 { + b.WriteString("\n") + } + b.WriteString(err.Error()) + } + return b.String() +} + +func (w waitError) Unwrap() error { + if len(w.errs) == 1 { + return w.errs[0] + } + return nil +} + +func Break() Cmd { + return Command( + CmdUsage{ + Summary: "break into interactive prompt", + }, + func(s *State, args ...string) (WaitFunc, error) { + tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) + if err != nil { + return nil, fmt.Errorf("open /dev/tty: %w", err) + } + defer tty.Close() + + prev, err := term.MakeRaw(int(tty.Fd())) + if err != nil { + return nil, fmt.Errorf("cannot set /dev/tty to raw mode") + } + defer term.Restore(int(tty.Fd()), prev) + + // Flush any pending logs + engine := s.engine + + term := term.NewTerminal(tty, "debug> ") + s.FlushLog() + fmt.Fprintf(term, "\nBreak! Control-d to continue.\n") + + for { + line, err := term.ReadLine() + if err != nil { + return nil, nil + } + err = engine.ExecuteLine(s, line, term) + if err != nil { + fmt.Fprintln(term, err.Error()) + } + } + }, + ) +} diff --git a/vendor/github.com/cilium/hive/script/cmds_other.go b/vendor/github.com/cilium/hive/script/cmds_other.go new file mode 100644 index 0000000000..847b225ae6 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/cmds_other.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(unix || windows) + +package script + +func isETXTBSY(err error) bool { + return false +} diff --git a/vendor/github.com/cilium/hive/script/cmds_posix.go b/vendor/github.com/cilium/hive/script/cmds_posix.go new file mode 100644 index 0000000000..2525f6e752 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/cmds_posix.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package script + +import ( + "errors" + "syscall" +) + +func isETXTBSY(err error) bool { + return errors.Is(err, syscall.ETXTBSY) +} diff --git a/vendor/github.com/cilium/hive/script/conds.go b/vendor/github.com/cilium/hive/script/conds.go new file mode 100644 index 0000000000..ffe5e3f0db --- /dev/null +++ b/vendor/github.com/cilium/hive/script/conds.go @@ -0,0 +1,198 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "fmt" + "os" + "runtime" + "sync" +) + +// DefaultConds returns a set of broadly useful script conditions. +// +// Run the 'help' command within a script engine to view a list of the available +// conditions. +func DefaultConds() map[string]Cond { + conds := make(map[string]Cond) + + conds["GOOS"] = PrefixCondition( + "runtime.GOOS == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.GOOS { + return true, nil + } + return false, nil + }) + + conds["GOARCH"] = PrefixCondition( + "runtime.GOARCH == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.GOARCH { + return true, nil + } + return false, nil + }) + + conds["compiler"] = PrefixCondition( + "runtime.Compiler == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.Compiler { + return true, nil + } + switch suffix { + case "gc", "gccgo": + return false, nil + default: + return false, fmt.Errorf("unrecognized compiler %q", suffix) + } + }) + + conds["root"] = BoolCondition("os.Geteuid() == 0", os.Geteuid() == 0) + + return conds +} + +// Condition returns a Cond with the given summary and evaluation function. +func Condition(summary string, eval func(*State) (bool, error)) Cond { + return &funcCond{eval: eval, usage: CondUsage{Summary: summary}} +} + +type funcCond struct { + eval func(*State) (bool, error) + usage CondUsage +} + +func (c *funcCond) Usage() *CondUsage { return &c.usage } + +func (c *funcCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + return c.eval(s) +} + +// PrefixCondition returns a Cond with the given summary and evaluation function. +func PrefixCondition(summary string, eval func(*State, string) (bool, error)) Cond { + return &prefixCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}} +} + +type prefixCond struct { + eval func(*State, string) (bool, error) + usage CondUsage +} + +func (c *prefixCond) Usage() *CondUsage { return &c.usage } + +func (c *prefixCond) Eval(s *State, suffix string) (bool, error) { + return c.eval(s, suffix) +} + +// BoolCondition returns a Cond with the given truth value and summary. +// The Cond rejects the use of condition suffixes. +func BoolCondition(summary string, v bool) Cond { + return &boolCond{v: v, usage: CondUsage{Summary: summary}} +} + +type boolCond struct { + v bool + usage CondUsage +} + +func (b *boolCond) Usage() *CondUsage { return &b.usage } + +func (b *boolCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + return b.v, nil +} + +// OnceCondition returns a Cond that calls eval the first time the condition is +// evaluated. Future calls reuse the same result. +// +// The eval function is not passed a *State because the condition is cached +// across all execution states and must not vary by state. +func OnceCondition(summary string, eval func() (bool, error)) Cond { + return &onceCond{eval: eval, usage: CondUsage{Summary: summary}} +} + +type onceCond struct { + once sync.Once + v bool + err error + eval func() (bool, error) + usage CondUsage +} + +func (l *onceCond) Usage() *CondUsage { return &l.usage } + +func (l *onceCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + l.once.Do(func() { l.v, l.err = l.eval() }) + return l.v, l.err +} + +// CachedCondition is like Condition but only calls eval the first time the +// condition is evaluated for a given suffix. +// Future calls with the same suffix reuse the earlier result. +// +// The eval function is not passed a *State because the condition is cached +// across all execution states and must not vary by state. +func CachedCondition(summary string, eval func(string) (bool, error)) Cond { + return &cachedCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}} +} + +type cachedCond struct { + m sync.Map + eval func(string) (bool, error) + usage CondUsage +} + +func (c *cachedCond) Usage() *CondUsage { return &c.usage } + +func (c *cachedCond) Eval(_ *State, suffix string) (bool, error) { + for { + var ready chan struct{} + + v, loaded := c.m.Load(suffix) + if !loaded { + ready = make(chan struct{}) + v, loaded = c.m.LoadOrStore(suffix, (<-chan struct{})(ready)) + + if !loaded { + inPanic := true + defer func() { + if inPanic { + c.m.Delete(suffix) + } + close(ready) + }() + + b, err := c.eval(suffix) + inPanic = false + + if err == nil { + c.m.Store(suffix, b) + return b, nil + } else { + c.m.Store(suffix, err) + return false, err + } + } + } + + switch v := v.(type) { + case bool: + return v, nil + case error: + return false, v + case <-chan struct{}: + <-v + } + } +} diff --git a/vendor/github.com/cilium/hive/script/engine.go b/vendor/github.com/cilium/hive/script/engine.go new file mode 100644 index 0000000000..7b9b9bf3f8 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/engine.go @@ -0,0 +1,853 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package script implements a small, customizable, platform-agnostic scripting +// language. +// +// Scripts are run by an [Engine] configured with a set of available commands +// and conditions that guard those commands. Each script has an associated +// working directory and environment, along with a buffer containing the stdout +// and stderr output of a prior command, tracked in a [State] that commands can +// inspect and modify. +// +// The default commands configured by [NewEngine] resemble a simplified Unix +// shell. +// +// # Script Language +// +// Each line of a script is parsed into a sequence of space-separated command +// words, with environment variable expansion within each word and # marking an +// end-of-line comment. Additional variables named ':' and '/' are expanded +// within script arguments (expanding to the value of os.PathListSeparator and +// os.PathSeparator respectively) but are not inherited in subprocess +// environments. +// +// Adding single quotes around text keeps spaces in that text from being treated +// as word separators and also disables environment variable expansion. +// Inside a single-quoted block of text, a repeated single quote indicates +// a literal single quote, as in: +// +// 'Don''t communicate by sharing memory.' +// +// A line beginning with # is a comment and conventionally explains what is +// being done or tested at the start of a new section of the script. +// +// Commands are executed one at a time, and errors are checked for each command; +// if any command fails unexpectedly, no subsequent commands in the script are +// executed. The command prefix ! indicates that the command on the rest of the +// line (typically go or a matching predicate) must fail instead of succeeding. +// The command prefix ? indicates that the command may or may not succeed, but +// the script should continue regardless. +// +// The command prefix [cond] indicates that the command on the rest of the line +// should only run when the condition is satisfied. +// +// A condition can be negated: [!root] means to run the rest of the line only if +// the user is not root. Multiple conditions may be given for a single command, +// for example, '[linux] [amd64] skip'. The command will run if all conditions +// are satisfied. +// +// Package script is particularly good for writing tests. +// Ironically, it has no tests. +package script + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "sort" + "strings" + "time" +) + +// An Engine stores the configuration for executing a set of scripts. +// +// The same Engine may execute multiple scripts concurrently. +type Engine struct { + Cmds map[string]Cmd + Conds map[string]Cond + + // If Quiet is true, Execute deletes log prints from the previous + // section when starting a new section. + Quiet bool +} + +// NewEngine returns an Engine configured with a basic set of commands and conditions. +func NewEngine() *Engine { + return &Engine{ + Cmds: DefaultCmds(), + Conds: DefaultConds(), + } +} + +// A Cmd is a command that is available to a script. +type Cmd interface { + // Run begins running the command. + // + // If the command produces output or can be run in the background, run returns + // a WaitFunc that will be called to obtain the result of the command and + // update the engine's stdout and stderr buffers. + // + // Run itself and the returned WaitFunc may inspect and/or modify the State, + // but the State's methods must not be called concurrently after Run has + // returned. + // + // Run may retain and access the args slice until the WaitFunc has returned. + Run(s *State, args ...string) (WaitFunc, error) + + // Usage returns the usage for the command, which the caller must not modify. + Usage() *CmdUsage +} + +// A WaitFunc is a function called to retrieve the results of a Cmd. +type WaitFunc func(*State) (stdout, stderr string, err error) + +// A CmdUsage describes the usage of a Cmd, independent of its name +// (which can change based on its registration). +type CmdUsage struct { + Summary string // in the style of the Name section of a Unix 'man' page, omitting the name + Args string // a brief synopsis of the command's arguments (only) + Detail []string // zero or more sentences in the style of the Description section of a Unix 'man' page + + // If Async is true, the Cmd is meaningful to run in the background, and its + // Run method must return either a non-nil WaitFunc or a non-nil error. + Async bool + + // RegexpArgs reports which arguments, if any, should be treated as regular + // expressions. It takes as input the raw, unexpanded arguments and returns + // the list of argument indices that will be interpreted as regular + // expressions. + // + // If RegexpArgs is nil, all arguments are assumed not to be regular + // expressions. + RegexpArgs func(rawArgs ...string) []int +} + +// A Cond is a condition deciding whether a command should be run. +type Cond interface { + // Eval reports whether the condition applies to the given State. + // + // If the condition's usage reports that it is a prefix, + // the condition must be used with a suffix. + // Otherwise, the passed-in suffix argument is always the empty string. + Eval(s *State, suffix string) (bool, error) + + // Usage returns the usage for the condition, which the caller must not modify. + Usage() *CondUsage +} + +// A CondUsage describes the usage of a Cond, independent of its name +// (which can change based on its registration). +type CondUsage struct { + Summary string // a single-line summary of when the condition is true + + // If Prefix is true, the condition is a prefix and requires a + // colon-separated suffix (like "[GOOS:linux]" for the "GOOS" condition). + // The suffix may be the empty string (like "[prefix:]"). + Prefix bool +} + +// Execute reads and executes script, writing the output to log. +// +// Execute stops and returns an error at the first command that does not succeed. +// The returned error's text begins with "file:line: ". +// +// If the script runs to completion or ends by a 'stop' command, +// Execute returns nil. +// +// Execute does not stop background commands started by the script +// before returning. To stop those, use [State.CloseAndWait] or the +// [Wait] command. +func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Writer) (err error) { + defer func(prev *Engine) { s.engine = prev }(s.engine) + s.engine = e + defer func(prev io.Writer) { s.logOut = prev }(s.logOut) + s.logOut = log + + var sectionStart time.Time + // endSection flushes the logs for the current section from s.log to log. + // ok indicates whether all commands in the section succeeded. + endSection := func(ok bool) error { + var err error + if sectionStart.IsZero() { + // We didn't write a section header or record a timestamp, so just dump the + // whole log without those. + if s.log.Len() > 0 { + err = s.FlushLog() + } + } else if s.log.Len() == 0 { + // Adding elapsed time for doing nothing is meaningless, so don't. + _, err = io.WriteString(log, "\n") + } else { + // Insert elapsed time for section at the end of the section's comment. + _, err = fmt.Fprintf(log, " (%.3fs)\n", time.Since(sectionStart).Seconds()) + + if err == nil && (!ok || !e.Quiet) { + err = s.FlushLog() + } else { + s.log.Reset() + } + } + + sectionStart = time.Time{} + return err + } + + var lineno int + lineErr := func(err error) error { + if errors.As(err, new(*CommandError)) { + return err + } + return fmt.Errorf("%s:%d: %w", file, lineno, err) + } + + // In case of failure or panic, flush any pending logs for the section. + defer func() { + if sErr := endSection(false); sErr != nil && err == nil { + err = lineErr(sErr) + } + }() + + for { + if err := s.ctx.Err(); err != nil { + // This error wasn't produced by any particular command, + // so don't wrap it in a CommandError. + return lineErr(err) + } + + line, err := script.ReadString('\n') + if err == io.EOF { + if line == "" { + break // Reached the end of the script. + } + // If the script doesn't end in a newline, interpret the final line. + } else if err != nil { + return lineErr(err) + } + line = strings.TrimSuffix(line, "\n") + lineno++ + + // The comment character "#" at the start of the line delimits a section of + // the script. + if strings.HasPrefix(line, "#") { + // If there was a previous section, the fact that we are starting a new + // one implies the success of the previous one. + // + // At the start of the script, the state may also contain accumulated logs + // from commands executed on the State outside of the engine in order to + // set it up; flush those logs too. + if err := endSection(true); err != nil { + return lineErr(err) + } + + // Log the section start without a newline so that we can add + // a timestamp for the section when it ends. + _, err = fmt.Fprintf(log, "%s", line) + sectionStart = time.Now() + if err != nil { + return lineErr(err) + } + continue + } + + cmd, err := parse(file, lineno, line) + if cmd == nil && err == nil { + continue // Ignore blank lines. + } + s.Logf("> %s\n", line) + if err != nil { + return lineErr(err) + } + + // Evaluate condition guards. + ok, err := e.conditionsActive(s, cmd.conds) + if err != nil { + return lineErr(err) + } + if !ok { + s.Logf("[condition not met]\n") + continue + } + + impl := e.Cmds[cmd.name] + + // Expand variables in arguments. + var regexpArgs []int + if impl != nil { + usage := impl.Usage() + if usage.RegexpArgs != nil { + // First join rawArgs without expansion to pass to RegexpArgs. + rawArgs := make([]string, 0, len(cmd.rawArgs)) + for _, frags := range cmd.rawArgs { + var b strings.Builder + for _, frag := range frags { + b.WriteString(frag.s) + } + rawArgs = append(rawArgs, b.String()) + } + regexpArgs = usage.RegexpArgs(rawArgs...) + } + } + cmd.args = expandArgs(s, cmd.rawArgs, regexpArgs) + + // Run the command. + err = e.runCommand(s, cmd, impl) + if err != nil { + if stop := (stopError{}); errors.As(err, &stop) { + // Since the 'stop' command halts execution of the entire script, + // log its message separately from the section in which it appears. + err = endSection(true) + s.Logf("%v\n", stop) + if err == nil { + return nil + } + } + return lineErr(err) + } + } + + if err := endSection(true); err != nil { + return lineErr(err) + } + return nil +} + +func (e *Engine) ExecuteLine(s *State, line string, log io.Writer) (err error) { + defer func(prev *Engine) { s.engine = prev }(s.engine) + s.engine = e + defer func(prev io.Writer) { s.logOut = prev }(s.logOut) + s.logOut = log + defer s.FlushLog() + + cmd, err := parse("", 0, line) + if cmd == nil && err == nil { + return nil + } + if err != nil { + return err + } + + // Evaluate condition guards. + ok, err := e.conditionsActive(s, cmd.conds) + if err != nil { + return err + } + if !ok { + s.Logf("[condition not met]\n") + return + } + + impl := e.Cmds[cmd.name] + + // Expand variables in arguments. + var regexpArgs []int + if impl != nil { + usage := impl.Usage() + if usage.RegexpArgs != nil { + // First join rawArgs without expansion to pass to RegexpArgs. + rawArgs := make([]string, 0, len(cmd.rawArgs)) + for _, frags := range cmd.rawArgs { + var b strings.Builder + for _, frag := range frags { + b.WriteString(frag.s) + } + rawArgs = append(rawArgs, b.String()) + } + regexpArgs = usage.RegexpArgs(rawArgs...) + } + } + cmd.args = expandArgs(s, cmd.rawArgs, regexpArgs) + + // Run the command. + err = e.runCommand(s, cmd, impl) + if err != nil { + if stop := (stopError{}); errors.As(err, &stop) { + // Since the 'stop' command halts execution of the entire script, + // log its message separately from the section in which it appears. + s.Logf("%v\n", stop) + if err == nil { + return nil + } + } + return err + } + return nil +} + +// A command is a complete command parsed from a script. +type command struct { + file string + line int + want expectedStatus + conds []condition // all must be satisfied + name string // the name of the command; must be non-empty + rawArgs [][]argFragment + args []string // shell-expanded arguments following name + background bool // command should run in background (ends with a trailing &) +} + +// An expectedStatus describes the expected outcome of a command. +// Script execution halts when a command does not match its expected status. +type expectedStatus string + +const ( + success expectedStatus = "" + failure expectedStatus = "!" + successOrFailure expectedStatus = "?" +) + +type argFragment struct { + s string + quoted bool // if true, disable variable expansion for this fragment +} + +type condition struct { + want bool + tag string +} + +const argSepChars = " \t\r\n#" + +// parse parses a single line as a list of space-separated arguments. +// subject to environment variable expansion (but not resplitting). +// Single quotes around text disable splitting and expansion. +// To embed a single quote, double it: +// +// 'Don''t communicate by sharing memory.' +func parse(filename string, lineno int, line string) (cmd *command, err error) { + cmd = &command{file: filename, line: lineno} + var ( + rawArg []argFragment // text fragments of current arg so far (need to add line[start:i]) + start = -1 // if >= 0, position where current arg text chunk starts + quoted = false // currently processing quoted text + ) + + flushArg := func() error { + if len(rawArg) == 0 { + return nil // Nothing to flush. + } + defer func() { rawArg = nil }() + + if cmd.name == "" && len(rawArg) == 1 && !rawArg[0].quoted { + arg := rawArg[0].s + + // Command prefix ! means negate the expectations about this command: + // go command should fail, match should not be found, etc. + // Prefix ? means allow either success or failure. + switch want := expectedStatus(arg); want { + case failure, successOrFailure: + if cmd.want != "" { + return errors.New("duplicated '!' or '?' token") + } + cmd.want = want + return nil + } + + // Command prefix [cond] means only run this command if cond is satisfied. + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + want := true + arg = strings.TrimSpace(arg[1 : len(arg)-1]) + if strings.HasPrefix(arg, "!") { + want = false + arg = strings.TrimSpace(arg[1:]) + } + if arg == "" { + return errors.New("empty condition") + } + cmd.conds = append(cmd.conds, condition{want: want, tag: arg}) + return nil + } + + if arg == "" { + return errors.New("empty command") + } + cmd.name = arg + return nil + } + + cmd.rawArgs = append(cmd.rawArgs, rawArg) + return nil + } + + for i := 0; ; i++ { + if !quoted && (i >= len(line) || strings.ContainsRune(argSepChars, rune(line[i]))) { + // Found arg-separating space. + if start >= 0 { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false}) + start = -1 + } + if err := flushArg(); err != nil { + return nil, err + } + if i >= len(line) || line[i] == '#' { + break + } + continue + } + if i >= len(line) { + return nil, errors.New("unterminated quoted argument") + } + if line[i] == '\'' { + if !quoted { + // starting a quoted chunk + if start >= 0 { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false}) + } + start = i + 1 + quoted = true + continue + } + // 'foo''bar' means foo'bar, like in rc shell and Pascal. + if i+1 < len(line) && line[i+1] == '\'' { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true}) + start = i + 1 + i++ // skip over second ' before next iteration + continue + } + // ending a quoted chunk + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true}) + start = i + 1 + quoted = false + continue + } + // found character worth saving; make sure we're saving + if start < 0 { + start = i + } + } + + if cmd.name == "" { + if cmd.want != "" || len(cmd.conds) > 0 || len(cmd.rawArgs) > 0 || cmd.background { + // The line contains a command prefix or suffix, but no actual command. + return nil, errors.New("missing command") + } + + // The line is blank, or contains only a comment. + return nil, nil + } + + if n := len(cmd.rawArgs); n > 0 { + last := cmd.rawArgs[n-1] + if len(last) == 1 && !last[0].quoted && last[0].s == "&" { + cmd.background = true + cmd.rawArgs = cmd.rawArgs[:n-1] + } + } + return cmd, nil +} + +// expandArgs expands the shell variables in rawArgs and joins them to form the +// final arguments to pass to a command. +func expandArgs(s *State, rawArgs [][]argFragment, regexpArgs []int) []string { + args := make([]string, 0, len(rawArgs)) + for i, frags := range rawArgs { + isRegexp := false + for _, j := range regexpArgs { + if i == j { + isRegexp = true + break + } + } + + var b strings.Builder + for _, frag := range frags { + if frag.quoted { + b.WriteString(frag.s) + } else { + b.WriteString(s.ExpandEnv(frag.s, isRegexp)) + } + } + args = append(args, b.String()) + } + return args +} + +// quoteArgs returns a string that parse would parse as args when passed to a command. +// +// TODO(bcmills): This function should have a fuzz test. +func quoteArgs(args []string) string { + var b strings.Builder + for i, arg := range args { + if i > 0 { + b.WriteString(" ") + } + if strings.ContainsAny(arg, "'"+argSepChars) { + // Quote the argument to a form that would be parsed as a single argument. + b.WriteString("'") + b.WriteString(strings.ReplaceAll(arg, "'", "''")) + b.WriteString("'") + } else { + b.WriteString(arg) + } + } + return b.String() +} + +func (e *Engine) conditionsActive(s *State, conds []condition) (bool, error) { + for _, cond := range conds { + var impl Cond + prefix, suffix, ok := strings.Cut(cond.tag, ":") + if ok { + impl = e.Conds[prefix] + if impl == nil { + return false, fmt.Errorf("unknown condition prefix %q", prefix) + } + if !impl.Usage().Prefix { + return false, fmt.Errorf("condition %q cannot be used with a suffix", prefix) + } + } else { + impl = e.Conds[cond.tag] + if impl == nil { + return false, fmt.Errorf("unknown condition %q", cond.tag) + } + if impl.Usage().Prefix { + return false, fmt.Errorf("condition %q requires a suffix", cond.tag) + } + } + active, err := impl.Eval(s, suffix) + + if err != nil { + return false, fmt.Errorf("evaluating condition %q: %w", cond.tag, err) + } + if active != cond.want { + return false, nil + } + } + + return true, nil +} + +func (e *Engine) runCommand(s *State, cmd *command, impl Cmd) error { + if impl == nil { + return cmdError(cmd, errors.New("unknown command")) + } + + async := impl.Usage().Async + if cmd.background && !async { + return cmdError(cmd, errors.New("command cannot be run in background")) + } + + wait, runErr := impl.Run(s, cmd.args...) + if wait == nil { + if async && runErr == nil { + return cmdError(cmd, errors.New("internal error: async command returned a nil WaitFunc")) + } + return checkStatus(cmd, runErr) + } + if runErr != nil { + return cmdError(cmd, errors.New("internal error: command returned both an error and a WaitFunc")) + } + + if cmd.background { + s.background = append(s.background, backgroundCmd{ + command: cmd, + wait: wait, + }) + // Clear stdout and stderr, since they no longer correspond to the last + // command executed. + s.stdout = "" + s.stderr = "" + return nil + } + + stdout, stderr, waitErr := wait(s) + s.stdout = stdout + s.stderr = stderr + if stdout != "" { + s.Logf("[stdout]\n%s", stdout) + } + if stderr != "" { + s.Logf("[stderr]\n%s", stderr) + } + if cmdErr := checkStatus(cmd, waitErr); cmdErr != nil { + return cmdErr + } + if waitErr != nil { + // waitErr was expected (by cmd.want), so log it instead of returning it. + s.Logf("[%v]\n", waitErr) + } + return nil +} + +func checkStatus(cmd *command, err error) error { + if err == nil { + if cmd.want == failure { + return cmdError(cmd, ErrUnexpectedSuccess) + } + return nil + } + + if s := (stopError{}); errors.As(err, &s) { + // This error originated in the Stop command. + // Propagate it as-is. + return cmdError(cmd, err) + } + + if w := (waitError{}); errors.As(err, &w) { + // This error was surfaced from a background process by a call to Wait. + // Add a call frame for Wait itself, but ignore its "want" field. + // (Wait itself cannot fail to wait on commands or else it would leak + // processes and/or goroutines — so a negative assertion for it would be at + // best ambiguous.) + return cmdError(cmd, err) + } + + if cmd.want == success { + return cmdError(cmd, err) + } + + if cmd.want == failure && (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) { + // The command was terminated because the script is no longer interested in + // its output, so we don't know what it would have done had it run to + // completion — for all we know, it could have exited without error if it + // ran just a smidge faster. + return cmdError(cmd, err) + } + + return nil +} + +// ListCmds prints to w a list of the named commands, +// annotating each with its arguments and a short usage summary. +// If verbose is true, ListCmds prints full details for each command. +// +// Each of the name arguments should be a command name. +// If no names are passed as arguments, ListCmds lists all the +// commands registered in e. +func (e *Engine) ListCmds(w io.Writer, verbose bool, names ...string) error { + if names == nil { + names = make([]string, 0, len(e.Cmds)) + for name := range e.Cmds { + names = append(names, name) + } + sort.Strings(names) + } + + for _, name := range names { + cmd := e.Cmds[name] + usage := cmd.Usage() + + suffix := "" + if usage.Async { + suffix = " [&]" + } + + _, err := fmt.Fprintf(w, "%s %s%s\n\t%s\n", name, usage.Args, suffix, usage.Summary) + if err != nil { + return err + } + + if verbose { + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + for _, line := range usage.Detail { + if err := wrapLine(w, line, 60, "\t"); err != nil { + return err + } + } + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + } + } + + return nil +} + +func wrapLine(w io.Writer, line string, cols int, indent string) error { + line = strings.TrimLeft(line, " ") + for len(line) > cols { + bestSpace := -1 + for i, r := range line { + if r == ' ' { + if i <= cols || bestSpace < 0 { + bestSpace = i + } + if i > cols { + break + } + } + } + if bestSpace < 0 { + break + } + + if _, err := fmt.Fprintf(w, "%s%s\n", indent, line[:bestSpace]); err != nil { + return err + } + line = line[bestSpace+1:] + } + + _, err := fmt.Fprintf(w, "%s%s\n", indent, line) + return err +} + +// ListConds prints to w a list of conditions, one per line, +// annotating each with a description and whether the condition +// is true in the state s (if s is non-nil). +// +// Each of the tag arguments should be a condition string of +// the form "name" or "name:suffix". If no tags are passed as +// arguments, ListConds lists all conditions registered in +// the engine e. +func (e *Engine) ListConds(w io.Writer, s *State, tags ...string) error { + if tags == nil { + tags = make([]string, 0, len(e.Conds)) + for name := range e.Conds { + tags = append(tags, name) + } + sort.Strings(tags) + } + + for _, tag := range tags { + if prefix, suffix, ok := strings.Cut(tag, ":"); ok { + cond := e.Conds[prefix] + if cond == nil { + return fmt.Errorf("unknown condition prefix %q", prefix) + } + usage := cond.Usage() + if !usage.Prefix { + return fmt.Errorf("condition %q cannot be used with a suffix", prefix) + } + + activeStr := "" + if s != nil { + if active, _ := cond.Eval(s, suffix); active { + activeStr = " (active)" + } + } + _, err := fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary) + if err != nil { + return err + } + continue + } + + cond := e.Conds[tag] + if cond == nil { + return fmt.Errorf("unknown condition %q", tag) + } + var err error + usage := cond.Usage() + if usage.Prefix { + _, err = fmt.Fprintf(w, "[%s:*]\n\t%s\n", tag, usage.Summary) + } else { + activeStr := "" + if s != nil { + if ok, _ := cond.Eval(s, ""); ok { + activeStr = " (active)" + } + } + _, err = fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary) + } + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/cilium/hive/script/errors.go b/vendor/github.com/cilium/hive/script/errors.go new file mode 100644 index 0000000000..7f43e72888 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/errors.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "errors" + "fmt" +) + +// ErrUnexpectedSuccess indicates that a script command that was expected to +// fail (as indicated by a "!" prefix) instead completed successfully. +var ErrUnexpectedSuccess = errors.New("unexpected success") + +// A CommandError describes an error resulting from attempting to execute a +// specific command. +type CommandError struct { + File string + Line int + Op string + Args []string + Err error +} + +func cmdError(cmd *command, err error) *CommandError { + return &CommandError{ + File: cmd.file, + Line: cmd.line, + Op: cmd.name, + Args: cmd.args, + Err: err, + } +} + +func (e *CommandError) Error() string { + if len(e.Args) == 0 { + return fmt.Sprintf("%s:%d: %s: %v", e.File, e.Line, e.Op, e.Err) + } + return fmt.Sprintf("%s:%d: %s %s: %v", e.File, e.Line, e.Op, quoteArgs(e.Args), e.Err) +} + +func (e *CommandError) Unwrap() error { return e.Err } + +// A UsageError reports the valid arguments for a command. +// +// It may be returned in response to invalid arguments. +type UsageError struct { + Name string + Command Cmd +} + +func (e *UsageError) Error() string { + usage := e.Command.Usage() + suffix := "" + if usage.Async { + suffix = " [&]" + } + return fmt.Sprintf("usage: %s %s%s", e.Name, usage.Args, suffix) +} + +// ErrUsage may be returned by a Command to indicate that it was called with +// invalid arguments; its Usage method may be called to obtain details. +var ErrUsage = errors.New("invalid usage") diff --git a/vendor/github.com/cilium/hive/script/internal/diff/diff.go b/vendor/github.com/cilium/hive/script/internal/diff/diff.go new file mode 100644 index 0000000000..0aeeb75eb0 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/internal/diff/diff.go @@ -0,0 +1,261 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// A pair is a pair of values tracked for both the x and y side of a diff. +// It is typically a pair of line indexes. +type pair struct{ x, y int } + +// Diff returns an anchored diff of the two texts old and new +// in the “unified diff” format. If old and new are identical, +// Diff returns a nil slice (no output). +// +// Unix diff implementations typically look for a diff with +// the smallest number of lines inserted and removed, +// which can in the worst case take time quadratic in the +// number of lines in the texts. As a result, many implementations +// either can be made to run for a long time or cut off the search +// after a predetermined amount of work. +// +// In contrast, this implementation looks for a diff with the +// smallest number of “unique” lines inserted and removed, +// where unique means a line that appears just once in both old and new. +// We call this an “anchored diff” because the unique lines anchor +// the chosen matching regions. An anchored diff is usually clearer +// than a standard diff, because the algorithm does not try to +// reuse unrelated blank lines or closing braces. +// The algorithm also guarantees to run in O(n log n) time +// instead of the standard O(n²) time. +// +// Some systems call this approach a “patience diff,” named for +// the “patience sorting” algorithm, itself named for a solitaire card game. +// We avoid that name for two reasons. First, the name has been used +// for a few different variants of the algorithm, so it is imprecise. +// Second, the name is frequently interpreted as meaning that you have +// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm, +// when in fact the algorithm is faster than the standard one. +func Diff(oldName string, old []byte, newName string, new []byte) []byte { + if bytes.Equal(old, new) { + return nil + } + x := lines(old) + y := lines(new) + + // Print diff header. + var out bytes.Buffer + fmt.Fprintf(&out, "diff %s %s\n", oldName, newName) + fmt.Fprintf(&out, "--- %s\n", oldName) + fmt.Fprintf(&out, "+++ %s\n", newName) + + // Loop over matches to consider, + // expanding each match to include surrounding lines, + // and then printing diff chunks. + // To avoid setup/teardown cases outside the loop, + // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair + // in the sequence of matches. + var ( + done pair // printed up to x[:done.x] and y[:done.y] + chunk pair // start lines of current chunk + count pair // number of lines from each side in current chunk + ctext []string // lines for current chunk + ) + for _, m := range tgs(x, y) { + if m.x < done.x { + // Already handled scanning forward from earlier match. + continue + } + + // Expand matching lines as far possible, + // establishing that x[start.x:end.x] == y[start.y:end.y]. + // Note that on the first (or last) iteration we may (or definitely do) + // have an empty match: start.x==end.x and start.y==end.y. + start := m + for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { + start.x-- + start.y-- + } + end := m + for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] { + end.x++ + end.y++ + } + + // Emit the mismatched lines before start into this chunk. + // (No effect on first sentinel iteration, when start = {0,0}.) + for _, s := range x[done.x:start.x] { + ctext = append(ctext, "-"+s) + count.x++ + } + for _, s := range y[done.y:start.y] { + ctext = append(ctext, "+"+s) + count.y++ + } + + // If we're not at EOF and have too few common lines, + // the chunk includes all the common lines and continues. + const C = 3 // number of context lines + if (end.x < len(x) || end.y < len(y)) && + (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) { + for _, s := range x[start.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + continue + } + + // End chunk with common lines for context. + if len(ctext) > 0 { + n := end.x - start.x + if n > C { + n = C + } + for _, s := range x[start.x : start.x+n] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = pair{start.x + n, start.y + n} + + // Format and emit chunk. + // Convert line numbers to 1-indexed. + // Special case: empty file shows up as 0,0 not 1,0. + if count.x > 0 { + chunk.x++ + } + if count.y > 0 { + chunk.y++ + } + fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y) + for _, s := range ctext { + out.WriteString(s) + } + count.x = 0 + count.y = 0 + ctext = ctext[:0] + } + + // If we reached EOF, we're done. + if end.x >= len(x) && end.y >= len(y) { + break + } + + // Otherwise start a new chunk. + chunk = pair{end.x - C, end.y - C} + for _, s := range x[chunk.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + } + + return out.Bytes() +} + +// lines returns the lines in the file x, including newlines. +// If the file does not end in a newline, one is supplied +// along with a warning about the missing newline. +func lines(x []byte) []string { + l := strings.SplitAfter(string(x), "\n") + if l[len(l)-1] == "" { + l = l[:len(l)-1] + } else { + // Treat last line as having a message about the missing newline attached, + // using the same text as BSD/GNU diff (including the leading backslash). + l[len(l)-1] += "\n\\ No newline at end of file\n" + } + return l +} + +// tgs returns the pairs of indexes of the longest common subsequence +// of unique lines in x and y, where a unique line is one that appears +// once in x and once in y. +// +// The longest common subsequence algorithm is as described in +// Thomas G. Szymanski, “A Special Case of the Maximal Common +// Subsequence Problem,” Princeton TR #170 (January 1975), +// available at https://research.swtch.com/tgs170.pdf. +func tgs(x, y []string) []pair { + // Count the number of times each string appears in a and b. + // We only care about 0, 1, many, counted as 0, -1, -2 + // for the x side and 0, -4, -8 for the y side. + // Using negative numbers now lets us distinguish positive line numbers later. + m := make(map[string]int) + for _, s := range x { + if c := m[s]; c > -2 { + m[s] = c - 1 + } + } + for _, s := range y { + if c := m[s]; c > -8 { + m[s] = c - 4 + } + } + + // Now unique strings can be identified by m[s] = -1+-4. + // + // Gather the indexes of those strings in x and y, building: + // xi[i] = increasing indexes of unique strings in x. + // yi[i] = increasing indexes of unique strings in y. + // inv[i] = index j such that x[xi[i]] = y[yi[j]]. + var xi, yi, inv []int + for i, s := range y { + if m[s] == -1+-4 { + m[s] = len(yi) + yi = append(yi, i) + } + } + for i, s := range x { + if j, ok := m[s]; ok && j >= 0 { + xi = append(xi, i) + inv = append(inv, j) + } + } + + // Apply Algorithm A from Szymanski's paper. + // In those terms, A = J = inv and B = [0, n). + // We add sentinel pairs {0,0}, and {len(x),len(y)} + // to the returned sequence, to help the processing loop. + J := inv + n := len(xi) + T := make([]int, n) + L := make([]int, n) + for i := range T { + T[i] = n + 1 + } + for i := 0; i < n; i++ { + k := sort.Search(n, func(k int) bool { + return T[k] >= J[i] + }) + T[k] = J[i] + L[i] = k + 1 + } + k := 0 + for _, v := range L { + if k < v { + k = v + } + } + seq := make([]pair, 2+k) + seq[1+k] = pair{len(x), len(y)} // sentinel at end + lastj := n + for i := n - 1; i >= 0; i-- { + if L[i] == k && J[i] < lastj { + seq[k] = pair{xi[i], yi[J[i]]} + k-- + } + } + seq[0] = pair{0, 0} // sentinel at start + return seq +} diff --git a/vendor/github.com/cilium/hive/script/makeraw_unix.go b/vendor/github.com/cilium/hive/script/makeraw_unix.go new file mode 100644 index 0000000000..3fffbc74dc --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unix.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || aix || linux || solaris || zos + +package script + +import ( + "golang.org/x/sys/unix" +) + +// MakeRaw sets the terminal to raw mode, but with interrupt signals enabled. +func MakeRaw(fd int) (restore func(), err error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + oldState := *termios + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.IEXTEN + termios.Lflag |= unix.ISIG // Enable interrupt signals + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { + return nil, err + } + + return func() { + unix.IoctlSetTermios(fd, ioctlWriteTermios, &oldState) + }, nil +} diff --git a/vendor/github.com/cilium/hive/script/makeraw_unix_bsd.go b/vendor/github.com/cilium/hive/script/makeraw_unix_bsd.go new file mode 100644 index 0000000000..064368d40a --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unix_bsd.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd + +package script + +import ( + "golang.org/x/sys/unix" +) + +const ( + ioctlReadTermios = unix.TIOCGETA + ioctlWriteTermios = unix.TIOCSETA +) diff --git a/vendor/github.com/cilium/hive/script/makeraw_unix_other.go b/vendor/github.com/cilium/hive/script/makeraw_unix_other.go new file mode 100644 index 0000000000..84449a5f56 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unix_other.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || linux || solaris || zos + +package script + +import "golang.org/x/sys/unix" + +const ( + ioctlReadTermios = unix.TCGETS + ioctlWriteTermios = unix.TCSETS +) diff --git a/vendor/github.com/cilium/hive/script/makeraw_unsupported.go b/vendor/github.com/cilium/hive/script/makeraw_unsupported.go new file mode 100644 index 0000000000..fe88ed87b2 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unsupported.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !linux + +package script + +import ( + "fmt" + "runtime" +) + +func MakeRaw(fd int) (restore func(), err error) { + return func() {}, fmt.Errorf("MakeRaw: not supported on %s", runtime.GOOS) +} diff --git a/vendor/github.com/cilium/hive/script/state.go b/vendor/github.com/cilium/hive/script/state.go new file mode 100644 index 0000000000..1226dcf60e --- /dev/null +++ b/vendor/github.com/cilium/hive/script/state.go @@ -0,0 +1,244 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "bytes" + "context" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "golang.org/x/tools/txtar" +) + +// A State encapsulates the current state of a running script engine, +// including the script environment and any running background commands. +type State struct { + engine *Engine // the engine currently executing the script, if any + + ctx context.Context + cancel context.CancelFunc + file string + log bytes.Buffer + logOut io.Writer + + workdir string // initial working directory + pwd string // current working directory during execution + env []string // environment list (for os/exec) + envMap map[string]string // environment mapping (matches env) + stdout string // standard output from last 'go' command; for 'stdout' command + stderr string // standard error from last 'go' command; for 'stderr' command + + background []backgroundCmd +} + +type backgroundCmd struct { + *command + wait WaitFunc +} + +// NewState returns a new State permanently associated with ctx, with its +// initial working directory in workdir and its initial environment set to +// initialEnv (or os.Environ(), if initialEnv is nil). +// +// The new State also contains pseudo-environment-variables for +// ${/} and ${:} (for the platform's path and list separators respectively), +// but does not pass those to subprocesses. +func NewState(ctx context.Context, workdir string, initialEnv []string) (*State, error) { + absWork, err := filepath.Abs(workdir) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + + // Make a fresh copy of the env slice to avoid aliasing bugs if we ever + // start modifying it in place; this also establishes the invariant that + // s.env contains no duplicates. + env := cleanEnv(initialEnv, absWork) + + envMap := make(map[string]string, len(env)) + + // Add entries for ${:} and ${/} to make it easier to write platform-independent + // paths in scripts. + envMap["/"] = string(os.PathSeparator) + envMap[":"] = string(os.PathListSeparator) + + for _, kv := range env { + if k, v, ok := strings.Cut(kv, "="); ok { + envMap[k] = v + } + } + + s := &State{ + ctx: ctx, + cancel: cancel, + workdir: absWork, + pwd: absWork, + env: env, + envMap: envMap, + } + s.Setenv("PWD", absWork) + return s, nil +} + +// CloseAndWait cancels the State's Context and waits for any background commands to +// finish. If any remaining background command ended in an unexpected state, +// Close returns a non-nil error. +func (s *State) CloseAndWait(log io.Writer) error { + s.cancel() + wait, err := Wait().Run(s) + if wait != nil { + panic("script: internal error: Wait unexpectedly returns its own WaitFunc") + } + defer func(prev io.Writer) { s.logOut = prev }(s.logOut) + s.logOut = log + if flushErr := s.FlushLog(); err == nil { + err = flushErr + } + return err +} + +// Chdir changes the State's working directory to the given path. +func (s *State) Chdir(path string) error { + dir := s.Path(path) + if _, err := os.Stat(dir); err != nil { + return &fs.PathError{Op: "Chdir", Path: dir, Err: err} + } + s.pwd = dir + s.Setenv("PWD", dir) + return nil +} + +// Context returns the Context with which the State was created. +func (s *State) Context() context.Context { + return s.ctx +} + +// Environ returns a copy of the current script environment, +// in the form "key=value". +func (s *State) Environ() []string { + return append([]string(nil), s.env...) +} + +// ExpandEnv replaces ${var} or $var in the string according to the values of +// the environment variables in s. References to undefined variables are +// replaced by the empty string. +func (s *State) ExpandEnv(str string, inRegexp bool) string { + return os.Expand(str, func(key string) string { + e := s.envMap[key] + if inRegexp { + // Quote to literal strings: we want paths like C:\work\go1.4 to remain + // paths rather than regular expressions. + e = regexp.QuoteMeta(e) + } + return e + }) +} + +// ExtractFiles extracts the files in ar to the state's current directory, +// expanding any environment variables within each name. +// +// The files must reside within the working directory with which the State was +// originally created. +func (s *State) ExtractFiles(ar *txtar.Archive) error { + wd := s.workdir + + // Add trailing separator to terminate wd. + // This prevents extracting to outside paths which prefix wd, + // e.g. extracting to /home/foobar when wd is /home/foo + if wd == "" { + panic("s.workdir is unexpectedly empty") + } + if !os.IsPathSeparator(wd[len(wd)-1]) { + wd += string(filepath.Separator) + } + + for _, f := range ar.Files { + name := s.Path(s.ExpandEnv(f.Name, false)) + + if !strings.HasPrefix(name, wd) { + return fmt.Errorf("file %#q is outside working directory", f.Name) + } + + if err := os.MkdirAll(filepath.Dir(name), 0777); err != nil { + return err + } + if err := os.WriteFile(name, f.Data, 0666); err != nil { + return err + } + } + + return nil +} + +// Getwd returns the directory in which to run the next script command. +func (s *State) Getwd() string { return s.pwd } + +// Logf writes output to the script's log without updating its stdout or stderr +// buffers. (The output log functions as a kind of meta-stderr.) +func (s *State) Logf(format string, args ...any) { + fmt.Fprintf(&s.log, format, args...) +} + +func (s *State) LogWriter() io.Writer { + return &s.log +} + +// FlushLog writes out the contents of the script's log and clears the buffer. +func (s *State) FlushLog() error { + _, err := s.logOut.Write(s.log.Bytes()) + s.log.Reset() + return err +} + +// LookupEnv retrieves the value of the environment variable in s named by the key. +func (s *State) LookupEnv(key string) (string, bool) { + v, ok := s.envMap[key] + return v, ok +} + +// Path returns the absolute path in the host operating system for a +// script-based (generally slash-separated and relative) path. +func (s *State) Path(path string) string { + if filepath.IsAbs(path) { + return filepath.Clean(path) + } + return filepath.Join(s.pwd, path) +} + +// Setenv sets the value of the environment variable in s named by the key. +func (s *State) Setenv(key, value string) error { + s.env = cleanEnv(append(s.env, key+"="+value), s.pwd) + s.envMap[key] = value + return nil +} + +// Stdout returns the stdout output of the last command run, +// or the empty string if no command has been run. +func (s *State) Stdout() string { return s.stdout } + +// Stderr returns the stderr output of the last command run, +// or the empty string if no command has been run. +func (s *State) Stderr() string { return s.stderr } + +// cleanEnv returns a copy of env with any duplicates removed in favor of +// later values and any required system variables defined. +// +// If env is nil, cleanEnv copies the environment from os.Environ(). +func cleanEnv(env []string, pwd string) []string { + // There are some funky edge-cases in this logic, especially on Windows (with + // case-insensitive environment variables and variables with keys like "=C:"). + // Rather than duplicating exec.dedupEnv here, cheat and use exec.Cmd directly. + cmd := &exec.Cmd{Env: env} + cmd.Dir = pwd + return cmd.Environ() +} diff --git a/vendor/github.com/cilium/statedb/any_table.go b/vendor/github.com/cilium/statedb/any_table.go new file mode 100644 index 0000000000..ebea155734 --- /dev/null +++ b/vendor/github.com/cilium/statedb/any_table.go @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package statedb + +import ( + "fmt" + "iter" +) + +// AnyTable allows any-typed access to a StateDB table. This is intended +// for building generic tooling for accessing the table and should be +// avoided if possible. +type AnyTable struct { + Meta TableMeta +} + +func (t AnyTable) All(txn ReadTxn) iter.Seq2[any, Revision] { + all, _ := t.AllWatch(txn) + return all +} + +func (t AnyTable) AllWatch(txn ReadTxn) (iter.Seq2[any, Revision], <-chan struct{}) { + indexTxn := txn.getTxn().mustIndexReadTxn(t.Meta, PrimaryIndexPos) + return partSeq[any](indexTxn.Iterator()), indexTxn.RootWatch() +} + +func (t AnyTable) UnmarshalYAML(data []byte) (any, error) { + return t.Meta.unmarshalYAML(data) +} + +func (t AnyTable) Insert(txn WriteTxn, obj any) (old any, hadOld bool, err error) { + var iobj object + iobj, hadOld, err = txn.getTxn().insert(t.Meta, Revision(0), obj) + if hadOld { + old = iobj.data + } + return +} + +func (t AnyTable) Delete(txn WriteTxn, obj any) (old any, hadOld bool, err error) { + var iobj object + iobj, hadOld, err = txn.getTxn().delete(t.Meta, Revision(0), obj) + if hadOld { + old = iobj.data + } + return +} + +func (t AnyTable) Get(txn ReadTxn, index string, key string) (any, Revision, bool, error) { + itxn, rawKey, err := t.queryIndex(txn, index, key) + if err != nil { + return nil, 0, false, err + } + if itxn.unique { + obj, _, ok := itxn.Get(rawKey) + return obj.data, obj.revision, ok, nil + } + // For non-unique indexes we need to prefix search and make sure to fully + // match the secondary key. + iter, _ := itxn.Prefix(rawKey) + for { + k, obj, ok := iter.Next() + if !ok { + break + } + secondary, _ := decodeNonUniqueKey(k) + if len(secondary) == len(rawKey) { + return obj.data, obj.revision, true, nil + } + } + return nil, 0, false, nil +} + +func (t AnyTable) Prefix(txn ReadTxn, index string, key string) (iter.Seq2[any, Revision], error) { + itxn, rawKey, err := t.queryIndex(txn, index, key) + if err != nil { + return nil, err + } + iter, _ := itxn.Prefix(rawKey) + if itxn.unique { + return partSeq[any](iter), nil + } + return nonUniqueSeq[any](iter, true, rawKey), nil +} + +func (t AnyTable) LowerBound(txn ReadTxn, index string, key string) (iter.Seq2[any, Revision], error) { + itxn, rawKey, err := t.queryIndex(txn, index, key) + if err != nil { + return nil, err + } + iter := itxn.LowerBound(rawKey) + if itxn.unique { + return partSeq[any](iter), nil + } + return nonUniqueLowerBoundSeq[any](iter, rawKey), nil +} + +func (t AnyTable) List(txn ReadTxn, index string, key string) (iter.Seq2[any, Revision], error) { + itxn, rawKey, err := t.queryIndex(txn, index, key) + if err != nil { + return nil, err + } + iter, _ := itxn.Prefix(rawKey) + if itxn.unique { + // Unique index means that there can be only a single matching object. + // Doing a Get() is more efficient than constructing an iterator. + value, _, ok := itxn.Get(rawKey) + return func(yield func(any, Revision) bool) { + if ok { + yield(value.data, value.revision) + } + }, nil + } + return nonUniqueSeq[any](iter, false, rawKey), nil +} + +func (t AnyTable) queryIndex(txn ReadTxn, index string, key string) (indexReadTxn, []byte, error) { + indexer := t.Meta.getIndexer(index) + if indexer == nil { + return indexReadTxn{}, nil, fmt.Errorf("invalid index %q", index) + } + rawKey, err := indexer.fromString(key) + if err != nil { + return indexReadTxn{}, nil, err + } + itxn, err := txn.getTxn().indexReadTxn(t.Meta, indexer.pos) + return itxn, rawKey, err +} + +func (t AnyTable) Changes(txn WriteTxn) (anyChangeIterator, error) { + return t.Meta.anyChanges(txn) +} + +func (t AnyTable) TableHeader() []string { + zero := t.Meta.proto() + if tw, ok := zero.(TableWritable); ok { + return tw.TableHeader() + } + return nil +} + +func (t AnyTable) Proto() any { + return t.Meta.proto() +} diff --git a/vendor/github.com/cilium/statedb/cell.go b/vendor/github.com/cilium/statedb/cell.go index 38b0658777..9a23395074 100644 --- a/vendor/github.com/cilium/statedb/cell.go +++ b/vendor/github.com/cilium/statedb/cell.go @@ -16,6 +16,7 @@ var Cell = cell.Module( cell.Provide( newHiveDB, + ScriptCommands, ), ) diff --git a/vendor/github.com/cilium/statedb/db.go b/vendor/github.com/cilium/statedb/db.go index 2ec4d62a93..be6f18ec41 100644 --- a/vendor/github.com/cilium/statedb/db.go +++ b/vendor/github.com/cilium/statedb/db.go @@ -208,9 +208,20 @@ func (db *DB) WriteTxn(table TableMeta, tables ...TableMeta) WriteTxn { lockAt := time.Now() smus.Lock() acquiredAt := time.Now() - root := *db.root.Load() tableEntries := make([]*tableEntry, len(root)) + + txn := &txn{ + db: db, + root: root, + handle: db.handleName, + acquiredAt: time.Now(), + writeTxn: writeTxn{ + modifiedTables: tableEntries, + smus: smus, + }, + } + var tableNames []string for _, table := range allTables { tableEntry := root[table.tablePos()] @@ -223,10 +234,12 @@ func (db *DB) WriteTxn(table TableMeta, tables ...TableMeta) WriteTxn { table.Name(), table.sortableMutex().AcquireDuration(), ) + table.acquired(txn) } // Sort the table names so they always appear ordered in metrics. sort.Strings(tableNames) + txn.tableNames = tableNames db.metrics.WriteTxnTotalAcquisition( db.handleName, @@ -234,19 +247,29 @@ func (db *DB) WriteTxn(table TableMeta, tables ...TableMeta) WriteTxn { acquiredAt.Sub(lockAt), ) - txn := &txn{ - db: db, - root: root, - modifiedTables: tableEntries, - smus: smus, - acquiredAt: acquiredAt, - tableNames: tableNames, - handle: db.handleName, - } runtime.SetFinalizer(txn, txnFinalizer) return txn } +func (db *DB) GetTables(txn ReadTxn) (tbls []TableMeta) { + root := txn.getTxn().root + tbls = make([]TableMeta, 0, len(root)) + for _, table := range root { + tbls = append(tbls, table.meta) + } + return +} + +func (db *DB) GetTable(txn ReadTxn, name string) TableMeta { + root := txn.getTxn().root + for _, table := range root { + if table.meta.Name() == name { + return table.meta + } + } + return nil +} + // Start the background workers for the database. // // This starts the graveyard worker that deals with garbage collecting diff --git a/vendor/github.com/cilium/statedb/http.go b/vendor/github.com/cilium/statedb/http.go index 37b01019c5..96b1a651fd 100644 --- a/vendor/github.com/cilium/statedb/http.go +++ b/vendor/github.com/cilium/statedb/http.go @@ -137,7 +137,7 @@ func runQuery(indexTxn indexReadTxn, lowerbound bool, queryKey []byte, onObject match = func(k []byte) bool { return len(k) == len(queryKey) } default: match = func(k []byte) bool { - _, secondary := decodeNonUniqueKey(k) + secondary, _ := decodeNonUniqueKey(k) return len(secondary) == len(queryKey) } } diff --git a/vendor/github.com/cilium/statedb/index/bool.go b/vendor/github.com/cilium/statedb/index/bool.go index c60f7b9e61..8e37371542 100644 --- a/vendor/github.com/cilium/statedb/index/bool.go +++ b/vendor/github.com/cilium/statedb/index/bool.go @@ -3,6 +3,8 @@ package index +import "strconv" + var ( trueKey = []byte{'T'} falseKey = []byte{'F'} @@ -14,3 +16,8 @@ func Bool(b bool) Key { } return falseKey } + +func BoolString(s string) (Key, error) { + b, err := strconv.ParseBool(s) + return Bool(b), err +} diff --git a/vendor/github.com/cilium/statedb/index/int.go b/vendor/github.com/cilium/statedb/index/int.go index 0b285c2638..caf26d8a88 100644 --- a/vendor/github.com/cilium/statedb/index/int.go +++ b/vendor/github.com/cilium/statedb/index/int.go @@ -5,6 +5,7 @@ package index import ( "encoding/binary" + "strconv" ) // The indexing functions on integers should use big-endian encoding. @@ -19,26 +20,78 @@ func Int(n int) Key { return Int32(int32(n)) } +func IntString(s string) (Key, error) { + return Int32String(s) +} + func Int64(n int64) Key { return Uint64(uint64(n)) } +func Int64String(s string) (Key, error) { + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return Key{}, err + } + return Uint64(uint64(n)), nil +} + func Int32(n int32) Key { return Uint32(uint32(n)) } +func Int32String(s string) (Key, error) { + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return Key{}, err + } + return Uint32(uint32(n)), nil +} + func Int16(n int16) Key { return Uint16(uint16(n)) } +func Int16String(s string) (Key, error) { + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + return Key{}, err + } + return Uint16(uint16(n)), nil +} + func Uint64(n uint64) Key { return binary.BigEndian.AppendUint64(nil, n) } +func Uint64String(s string) (Key, error) { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return Key{}, err + } + return Uint64(n), nil +} + func Uint32(n uint32) Key { return binary.BigEndian.AppendUint32(nil, n) } +func Uint32String(s string) (Key, error) { + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return Key{}, err + } + return Uint32(uint32(n)), nil +} + func Uint16(n uint16) Key { return binary.BigEndian.AppendUint16(nil, n) } + +func Uint16String(s string) (Key, error) { + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return Key{}, err + } + return Uint16(uint16(n)), nil +} diff --git a/vendor/github.com/cilium/statedb/index/netip.go b/vendor/github.com/cilium/statedb/index/netip.go index b8223d0554..7a95f63469 100644 --- a/vendor/github.com/cilium/statedb/index/netip.go +++ b/vendor/github.com/cilium/statedb/index/netip.go @@ -20,8 +20,24 @@ func NetIPAddr(addr netip.Addr) Key { return buf[:] } +func NetIPAddrString(s string) (Key, error) { + addr, err := netip.ParseAddr(s) + if err != nil { + return Key{}, err + } + return NetIPAddr(addr), nil +} + func NetIPPrefix(prefix netip.Prefix) Key { // Use the 16-byte form plus bits to have a constant-size key. addrBytes := prefix.Addr().As16() return append(addrBytes[:], uint8(prefix.Bits())) } + +func NetIPPrefixString(s string) (Key, error) { + prefix, err := netip.ParsePrefix(s) + if err != nil { + return Key{}, err + } + return NetIPPrefix(prefix), nil +} diff --git a/vendor/github.com/cilium/statedb/index/string.go b/vendor/github.com/cilium/statedb/index/string.go index 9a678f0ca2..99430bc3bd 100644 --- a/vendor/github.com/cilium/statedb/index/string.go +++ b/vendor/github.com/cilium/statedb/index/string.go @@ -12,6 +12,10 @@ func String(s string) Key { return []byte(s) } +func FromString(s string) (Key, error) { + return String(s), nil +} + func Stringer[T fmt.Stringer](s T) Key { return String(s.String()) } diff --git a/vendor/github.com/cilium/statedb/internal/time.go b/vendor/github.com/cilium/statedb/internal/time.go new file mode 100644 index 0000000000..5463ea0e4e --- /dev/null +++ b/vendor/github.com/cilium/statedb/internal/time.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package internal + +import ( + "fmt" + "time" +) + +func PrettySince(t time.Time) string { + return PrettyDuration(time.Since(t)) +} + +func PrettyDuration(d time.Duration) string { + ago := float64(d) / float64(time.Microsecond) + + // micros + if ago < 1000.0 { + return fmt.Sprintf("%.1fus", ago) + } + + // millis + ago /= 1000.0 + if ago < 1000.0 { + return fmt.Sprintf("%.1fms", ago) + } + // secs + ago /= 1000.0 + if ago < 60.0 { + return fmt.Sprintf("%.1fs", ago) + } + // mins + ago /= 60.0 + if ago < 60.0 { + return fmt.Sprintf("%.1fm", ago) + } + // hours + ago /= 60.0 + return fmt.Sprintf("%.1fh", ago) +} diff --git a/vendor/github.com/cilium/statedb/iterator.go b/vendor/github.com/cilium/statedb/iterator.go index 08b4e79042..301ce330d5 100644 --- a/vendor/github.com/cilium/statedb/iterator.go +++ b/vendor/github.com/cilium/statedb/iterator.go @@ -4,6 +4,7 @@ package statedb import ( + "bytes" "fmt" "iter" "slices" @@ -53,7 +54,7 @@ func ToSeq[A, B any](seq iter.Seq2[A, B]) iter.Seq[A] { } } -// partSeq returns a sequence of objects from a part Iterator. +// partSeq returns a casted sequence of objects from a part Iterator. func partSeq[Obj any](iter *part.Iterator[object]) iter.Seq2[Obj, Revision] { return func(yield func(Obj, Revision) bool) { // Iterate over a clone of the original iterator to allow the sequence to be iterated @@ -75,7 +76,7 @@ func partSeq[Obj any](iter *part.Iterator[object]) iter.Seq2[Obj, Revision] { // Non-unique indexes work by concatenating the secondary key with the // primary key and then prefix searching for the items: // -// +// \0 // ^^^^^^^^^^^ // // Since the primary key can be of any length and we're prefix searching, @@ -84,27 +85,53 @@ func partSeq[Obj any](iter *part.Iterator[object]) iter.Seq2[Obj, Revision] { // For example if we search for the key "aaaa", then we might have the following // matches (_ is just delimiting, not part of the key): // -// aaaa_bbb4 -// aaa_abab3 -// aaaa_ccc4 +// aaaa\0bbb4 +// aaa\0abab3 +// aaaa\0ccc4 // -// We yield "aaaa_bbb4", skip "aaa_abab3" and yield "aaaa_ccc4". -func nonUniqueSeq[Obj any](iter *part.Iterator[object], searchKey []byte) iter.Seq2[Obj, Revision] { +// We yield "aaaa\0bbb4", skip "aaa\0abab3" and yield "aaaa\0ccc4". +func nonUniqueSeq[Obj any](iter *part.Iterator[object], prefixSearch bool, searchKey []byte) iter.Seq2[Obj, Revision] { return func(yield func(Obj, Revision) bool) { // Clone the iterator to allow multiple iterations over the sequence. it := iter.Clone() + + var visited map[string]struct{} + if prefixSearch { + // When prefix searching, keep track of objects we've already seen as + // multiple keys in non-unique index may map to a single object. + // When just doing a List() on a non-unique index we will see each object + // only once and do not need to track this. + // + // This of course makes iterating over a non-unique index with a prefix + // (or lowerbound search) about 20x slower than normal! + visited = map[string]struct{}{} + } + for { key, iobj, ok := it.Next() if !ok { break } - _, secondary := decodeNonUniqueKey(key) + secondary, primary := decodeNonUniqueKey(key) - // The secondary key doesn't match the search key. Since the primary - // key length can vary, we need to continue the prefix search. - if len(secondary) != len(searchKey) { + switch { + case !prefixSearch && len(secondary) != len(searchKey): + // This a List(), thus secondary key must match length exactly. continue + case prefixSearch && len(secondary) < len(searchKey): + // This is Prefix(), thus key must be equal or longer to search key. + continue + } + + if prefixSearch { + // When doing a prefix search on a non-unique index we may see the + // same object multiple times since multiple keys may point it. + // Skip if we've already seen this object. + if _, found := visited[string(primary)]; found { + continue + } + visited[string(primary)] = struct{}{} } if !yield(iobj.data.(Obj), iobj.revision) { @@ -114,6 +141,37 @@ func nonUniqueSeq[Obj any](iter *part.Iterator[object], searchKey []byte) iter.S } } +func nonUniqueLowerBoundSeq[Obj any](iter *part.Iterator[object], searchKey []byte) iter.Seq2[Obj, Revision] { + return func(yield func(Obj, Revision) bool) { + // Clone the iterator to allow multiple uses. + iter = iter.Clone() + + // Keep track of objects we've already seen as multiple keys in non-unique + // index may map to a single object. + visited := map[string]struct{}{} + for { + key, iobj, ok := iter.Next() + if !ok { + break + } + // With a non-unique index we have a composite key . + // This means we need to check every key that it's larger or equal to the search key. + // Just seeking to the first one isn't enough as the secondary key length may vary. + secondary, primary := decodeNonUniqueKey(key) + if bytes.Compare(secondary, searchKey) >= 0 { + if _, found := visited[string(primary)]; found { + continue + } + visited[string(primary)] = struct{}{} + + if !yield(iobj.data.(Obj), iobj.revision) { + return + } + } + } + } +} + // iterator adapts the "any" object iterator to a typed object. type iterator[Obj any] struct { iter interface{ Next() ([]byte, object, bool) } @@ -128,6 +186,13 @@ func (it *iterator[Obj]) Next() (obj Obj, revision uint64, ok bool) { return } +// Iterator for iterating a sequence objects. +type Iterator[Obj any] interface { + // Next returns the next object and its revision if ok is true, otherwise + // zero values to mean that the iteration has finished. + Next() (obj Obj, rev Revision, ok bool) +} + func NewDualIterator[Obj any](left, right Iterator[Obj]) *DualIterator[Obj] { return &DualIterator[Obj]{ left: iterState[Obj]{iter: left}, diff --git a/vendor/github.com/cilium/statedb/part/map.go b/vendor/github.com/cilium/statedb/part/map.go index 9548a147a7..70d0b070fa 100644 --- a/vendor/github.com/cilium/statedb/part/map.go +++ b/vendor/github.com/cilium/statedb/part/map.go @@ -9,6 +9,8 @@ import ( "fmt" "iter" "reflect" + + "gopkg.in/yaml.v3" ) // Map of key-value pairs. The zero value is ready for use, provided @@ -22,8 +24,8 @@ type Map[K, V any] struct { } type mapKVPair[K, V any] struct { - Key K `json:"k"` - Value V `json:"v"` + Key K `json:"k" yaml:"k"` + Value V `json:"v" yaml:"v"` } // FromMap copies values from the hash map into the given Map. @@ -238,3 +240,29 @@ func (m *Map[K, V]) UnmarshalJSON(data []byte) error { m.tree = txn.CommitOnly() return nil } + +func (m Map[K, V]) MarshalYAML() (any, error) { + kvs := make([]mapKVPair[K, V], 0, m.Len()) + iter := m.tree.Iterator() + for _, kv, ok := iter.Next(); ok; _, kv, ok = iter.Next() { + kvs = append(kvs, kv) + } + return kvs, nil +} + +func (m *Map[K, V]) UnmarshalYAML(value *yaml.Node) error { + if value.Kind != yaml.SequenceNode { + return fmt.Errorf("%T.UnmarshalYAML: expected sequence", m) + } + m.ensureTree() + txn := m.tree.Txn() + for _, e := range value.Content { + var kv mapKVPair[K, V] + if err := e.Decode(&kv); err != nil { + return err + } + txn.Insert(m.bytesFromKey(kv.Key), mapKVPair[K, V]{kv.Key, kv.Value}) + } + m.tree = txn.CommitOnly() + return nil +} diff --git a/vendor/github.com/cilium/statedb/part/set.go b/vendor/github.com/cilium/statedb/part/set.go index 8c677bcda6..89a91f0a67 100644 --- a/vendor/github.com/cilium/statedb/part/set.go +++ b/vendor/github.com/cilium/statedb/part/set.go @@ -8,6 +8,9 @@ import ( "encoding/json" "fmt" "iter" + "slices" + + "gopkg.in/yaml.v3" ) // Set is a persistent (immutable) set of values. A Set can be @@ -208,6 +211,32 @@ func (s *Set[T]) UnmarshalJSON(data []byte) error { return nil } +func (s Set[T]) MarshalYAML() (any, error) { + // TODO: Once yaml.v3 supports iter.Seq, drop the Collect(). + return slices.Collect(s.All()), nil +} + +func (s *Set[T]) UnmarshalYAML(value *yaml.Node) error { + if value.Kind != yaml.SequenceNode { + return fmt.Errorf("%T.UnmarshalYAML: expected sequence", s) + } + + if s.tree == nil { + *s = NewSet[T]() + } + txn := s.tree.Txn() + + for _, e := range value.Content { + var v T + if err := e.Decode(&v); err != nil { + return err + } + txn.Insert(s.toBytes(v), v) + } + s.tree = txn.CommitOnly() + return nil +} + func toSeq[T any](iter *Iterator[T]) iter.Seq[T] { return func(yield func(T) bool) { if iter == nil { diff --git a/vendor/github.com/cilium/statedb/reconciler/metrics.go b/vendor/github.com/cilium/statedb/reconciler/metrics.go index f30984a8ee..e53b3b1003 100644 --- a/vendor/github.com/cilium/statedb/reconciler/metrics.go +++ b/vendor/github.com/cilium/statedb/reconciler/metrics.go @@ -24,6 +24,8 @@ const ( ) type ExpVarMetrics struct { + root *expvar.Map + ReconciliationCountVar *expvar.Map ReconciliationDurationVar *expvar.Map ReconciliationTotalErrorsVar *expvar.Map @@ -73,14 +75,22 @@ func NewUnpublishedExpVarMetrics() *ExpVarMetrics { return newExpVarMetrics(false) } +func (m *ExpVarMetrics) Map() *expvar.Map { + return m.root +} + func newExpVarMetrics(publish bool) *ExpVarMetrics { + root := new(expvar.Map).Init() newMap := func(name string) *expvar.Map { if publish { return expvar.NewMap(name) } - return new(expvar.Map).Init() + m := new(expvar.Map).Init() + root.Set(name, m) + return m } return &ExpVarMetrics{ + root: root, ReconciliationCountVar: newMap("reconciliation_count"), ReconciliationDurationVar: newMap("reconciliation_duration"), ReconciliationTotalErrorsVar: newMap("reconciliation_total_errors"), diff --git a/vendor/github.com/cilium/statedb/reconciler/types.go b/vendor/github.com/cilium/statedb/reconciler/types.go index aa69beb285..01b9fa8224 100644 --- a/vendor/github.com/cilium/statedb/reconciler/types.go +++ b/vendor/github.com/cilium/statedb/reconciler/types.go @@ -19,6 +19,8 @@ import ( "github.com/cilium/hive/job" "github.com/cilium/statedb" "github.com/cilium/statedb/index" + "github.com/cilium/statedb/internal" + "gopkg.in/yaml.v3" ) type Reconciler[Obj any] interface { @@ -142,36 +144,48 @@ type Status struct { id uint64 } -func (s Status) IsPendingOrRefreshing() bool { - return s.Kind == StatusKindPending || s.Kind == StatusKindRefreshing +// statusJSON defines the JSON/YAML format for [Status]. Separate to +// [Status] to allow custom unmarshalling that fills in [id]. +type statusJSON struct { + Kind string `json:"kind" yaml:"kind"` + UpdatedAt time.Time `json:"updated-at" yaml:"updated-at"` + Error string `json:"error,omitempty" yaml:"error,omitempty"` } -func (s Status) String() string { - if s.Kind == StatusKindError { - return fmt.Sprintf("Error: %s (%s ago)", s.Error, prettySince(s.UpdatedAt)) - } - return fmt.Sprintf("%s (%s ago)", s.Kind, prettySince(s.UpdatedAt)) +func (sj *statusJSON) fill(s *Status) { + s.Kind = StatusKind(sj.Kind) + s.UpdatedAt = sj.UpdatedAt + s.Error = sj.Error + s.id = nextID() } -func prettySince(t time.Time) string { - ago := float64(time.Now().Sub(t)) / float64(time.Millisecond) - // millis - if ago < 1000.0 { - return fmt.Sprintf("%.1fms", ago) +func (s *Status) UnmarshalYAML(value *yaml.Node) error { + var sj statusJSON + if err := value.Decode(&sj); err != nil { + return err } - // secs - ago /= 1000.0 - if ago < 60.0 { - return fmt.Sprintf("%.1fs", ago) + sj.fill(s) + return nil +} + +func (s *Status) UnmarshalJSON(data []byte) error { + var sj statusJSON + if err := json.Unmarshal(data, &sj); err != nil { + return err } - // mins - ago /= 60.0 - if ago < 60.0 { - return fmt.Sprintf("%.1fm", ago) + sj.fill(s) + return nil +} + +func (s Status) IsPendingOrRefreshing() bool { + return s.Kind == StatusKindPending || s.Kind == StatusKindRefreshing +} + +func (s Status) String() string { + if s.Kind == StatusKindError { + return fmt.Sprintf("Error: %s (%s ago)", s.Error, internal.PrettySince(s.UpdatedAt)) } - // hours - ago /= 60.0 - return fmt.Sprintf("%.1fh", ago) + return fmt.Sprintf("%s (%s ago)", s.Kind, internal.PrettySince(s.UpdatedAt)) } var idGen atomic.Uint64 @@ -206,6 +220,7 @@ func StatusRefreshing() Status { Kind: StatusKindRefreshing, UpdatedAt: time.Now(), Error: "", + id: nextID(), } } @@ -216,6 +231,7 @@ func StatusDone() Status { Kind: StatusKindDone, UpdatedAt: time.Now(), Error: "", + id: nextID(), } } @@ -226,6 +242,7 @@ func StatusError(err error) Status { Kind: StatusKindError, UpdatedAt: time.Now(), Error: err.Error(), + id: nextID(), } } @@ -314,7 +331,7 @@ func (s StatusSet) String() string { b.WriteString(strings.Join(done, " ")) } b.WriteString(" (") - b.WriteString(prettySince(updatedAt)) + b.WriteString(internal.PrettySince(updatedAt)) b.WriteString(" ago)") return b.String() } diff --git a/vendor/github.com/cilium/statedb/script.go b/vendor/github.com/cilium/statedb/script.go new file mode 100644 index 0000000000..88488a8c20 --- /dev/null +++ b/vendor/github.com/cilium/statedb/script.go @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package statedb + +import ( + "bytes" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "iter" + "maps" + "os" + "regexp" + "slices" + "strings" + "time" + + "github.com/cilium/hive" + "github.com/cilium/hive/script" + "github.com/liggitt/tabwriter" + "golang.org/x/time/rate" + "gopkg.in/yaml.v3" +) + +func ScriptCommands(db *DB) hive.ScriptCmdOut { + subCmds := map[string]script.Cmd{ + "tables": TablesCmd(db), + "show": ShowCmd(db), + "cmp": CompareCmd(db), + "insert": InsertCmd(db), + "delete": DeleteCmd(db), + "get": GetCmd(db), + "prefix": PrefixCmd(db), + "list": ListCmd(db), + "lowerbound": LowerBoundCmd(db), + "watch": WatchCmd(db), + "initialized": InitializedCmd(db), + } + subCmdsList := strings.Join(slices.Collect(maps.Keys(subCmds)), ", ") + return hive.NewScriptCmd( + "db", + script.Command( + script.CmdUsage{ + Summary: "Inspect and manipulate StateDB", + Args: "cmd args...", + Detail: []string{ + "Supported commands: " + subCmdsList, + }, + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + if len(args) < 1 { + return nil, fmt.Errorf("expected command (%s)", subCmdsList) + } + cmd, ok := subCmds[args[0]] + if !ok { + return nil, fmt.Errorf("command not found, expected one of %s", subCmdsList) + } + wf, err := cmd.Run(s, args[1:]...) + if errors.Is(err, errUsage) { + s.Logf("usage: db %s %s\n", args[0], cmd.Usage().Args) + } + return wf, err + }, + ), + ) +} + +var errUsage = errors.New("bad arguments") + +func TablesCmd(db *DB) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Show StateDB tables", + Args: "table", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + txn := db.ReadTxn() + tbls := db.GetTables(txn) + w := newTabWriter(s.LogWriter()) + fmt.Fprintf(w, "Name\tObject count\tDeleted objects\tIndexes\tInitializers\tGo type\tLast WriteTxn\n") + for _, tbl := range tbls { + idxs := strings.Join(tbl.Indexes(), ", ") + fmt.Fprintf(w, "%s\t%d\t%d\t%s\t%v\t%T\t%s\n", + tbl.Name(), tbl.NumObjects(txn), tbl.numDeletedObjects(txn), idxs, tbl.PendingInitializers(txn), tbl.proto(), tbl.getAcquiredInfo()) + } + w.Flush() + return nil, nil + }, + ) +} + +func newCmdFlagSet() *flag.FlagSet { + return &flag.FlagSet{ + // Disable showing the normal usage. + Usage: func() {}, + } +} + +func InitializedCmd(db *DB) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Wait until all or specific tables have been initialized", + Args: "(-timeout=) table...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + txn := db.ReadTxn() + allTbls := db.GetTables(txn) + tbls := allTbls + + flags := newCmdFlagSet() + timeout := flags.Duration("timeout", 5*time.Second, "Maximum amount of time to wait for the table contents to match") + if err := flags.Parse(args); err != nil { + return nil, fmt.Errorf("%w: %s", errUsage, err) + } + timeoutChan := time.After(*timeout) + args = flags.Args() + + if len(args) > 0 { + // Specific tables requested, look them up. + tbls = make([]TableMeta, 0, len(args)) + for _, tableName := range args { + found := false + for _, tbl := range allTbls { + if tableName == tbl.Name() { + tbls = append(tbls, tbl) + found = true + break + } + } + if !found { + return nil, fmt.Errorf("table %q not found", tableName) + } + } + } + + for _, tbl := range tbls { + init, watch := tbl.Initialized(txn) + if init { + s.Logf("%s initialized\n", tbl.Name()) + continue + } + s.Logf("Waiting for %s to initialize (%v)...\n", tbl.Name(), tbl.PendingInitializers(txn)) + select { + case <-s.Context().Done(): + return nil, s.Context().Err() + case <-timeoutChan: + return nil, fmt.Errorf("timed out") + case <-watch: + s.Logf("%s initialized\n", tbl.Name()) + } + } + return nil, nil + }, + ) +} + +func ShowCmd(db *DB) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Show table", + Args: "(-o=) (-columns=col1,...) (-format={table,yaml,json}) table", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + flags := newCmdFlagSet() + file := flags.String("o", "", "File to write to instead of stdout") + columns := flags.String("columns", "", "Comma-separated list of columns to write") + format := flags.String("format", "table", "Format to write in (table, yaml, json)") + if err := flags.Parse(args); err != nil { + return nil, fmt.Errorf("%w: %s", errUsage, err) + } + + var cols []string + if len(*columns) > 0 { + cols = strings.Split(*columns, ",") + } + + args = flags.Args() + if len(args) < 1 { + return nil, fmt.Errorf("%w: missing table name", errUsage) + } + tableName := args[0] + return func(*script.State) (stdout, stderr string, err error) { + var buf strings.Builder + var w io.Writer + if *file == "" { + w = &buf + } else { + f, err := os.OpenFile(s.Path(*file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return "", "", fmt.Errorf("OpenFile(%s): %w", *file, err) + } + defer f.Close() + w = f + } + tbl, txn, err := getTable(db, tableName) + if err != nil { + return "", "", err + } + err = writeObjects(tbl, tbl.All(txn), w, cols, *format) + return buf.String(), "", err + }, nil + }) +} + +func CompareCmd(db *DB) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Compare table", + Args: "(-timeout=) (-grep=) table file", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + flags := newCmdFlagSet() + timeout := flags.Duration("timeout", time.Second, "Maximum amount of time to wait for the table contents to match") + grep := flags.String("grep", "", "Grep the result rows and only compare matching ones") + err := flags.Parse(args) + args = flags.Args() + if err != nil || len(args) != 2 { + return nil, fmt.Errorf("%w: %s", errUsage, err) + } + + var grepRe *regexp.Regexp + if *grep != "" { + grepRe, err = regexp.Compile(*grep) + if err != nil { + return nil, fmt.Errorf("bad grep: %w", err) + } + } + + tableName := args[0] + + txn := db.ReadTxn() + meta := db.GetTable(txn, tableName) + if meta == nil { + return nil, fmt.Errorf("table %q not found", tableName) + } + tbl := AnyTable{Meta: meta} + header := tbl.TableHeader() + + data, err := os.ReadFile(s.Path(args[1])) + if err != nil { + return nil, fmt.Errorf("ReadFile(%s): %w", args[1], err) + } + lines := strings.Split(string(data), "\n") + lines = slices.DeleteFunc(lines, func(line string) bool { + return strings.TrimSpace(line) == "" + }) + if len(lines) < 1 { + return nil, fmt.Errorf("%q missing header line, e.g. %q", args[1], strings.Join(header, " ")) + } + + columnNames, columnPositions := splitHeaderLine(lines[0]) + columnIndexes, err := getColumnIndexes(columnNames, header) + if err != nil { + return nil, err + } + lines = lines[1:] + origLines := lines + timeoutChan := time.After(*timeout) + + for { + lines = origLines + + // Create the diff between 'lines' and the rows in the table. + equal := true + var diff bytes.Buffer + w := newTabWriter(&diff) + fmt.Fprintf(w, " %s\n", joinByPositions(columnNames, columnPositions)) + + objs, watch := tbl.AllWatch(db.ReadTxn()) + for obj := range objs { + rowRaw := takeColumns(obj.(TableWritable).TableRow(), columnIndexes) + row := joinByPositions(rowRaw, columnPositions) + if grepRe != nil && !grepRe.Match([]byte(row)) { + continue + } + + if len(lines) == 0 { + equal = false + fmt.Fprintf(w, "- %s\n", row) + continue + } + line := lines[0] + splitLine := splitByPositions(line, columnPositions) + + if slices.Equal(rowRaw, splitLine) { + fmt.Fprintf(w, " %s\n", row) + } else { + fmt.Fprintf(w, "- %s\n", row) + fmt.Fprintf(w, "+ %s\n", line) + equal = false + } + lines = lines[1:] + } + for _, line := range lines { + fmt.Fprintf(w, "+ %s\n", line) + equal = false + } + if equal { + return nil, nil + } + w.Flush() + + select { + case <-s.Context().Done(): + return nil, s.Context().Err() + + case <-timeoutChan: + return nil, fmt.Errorf("table mismatch:\n%s", diff.String()) + + case <-watch: + } + } + }) +} + +func InsertCmd(db *DB) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Insert object into a table", + Args: "table path...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + return insertOrDelete(true, db, s, args...) + }, + ) +} + +func DeleteCmd(db *DB) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Delete an object from the table", + Args: "table path...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + return insertOrDelete(false, db, s, args...) + }, + ) +} + +func getTable(db *DB, tableName string) (*AnyTable, ReadTxn, error) { + txn := db.ReadTxn() + meta := db.GetTable(txn, tableName) + if meta == nil { + return nil, nil, fmt.Errorf("table %q not found", tableName) + } + return &AnyTable{Meta: meta}, txn, nil +} + +func insertOrDelete(insert bool, db *DB, s *script.State, args ...string) (script.WaitFunc, error) { + if len(args) < 2 { + return nil, fmt.Errorf("%w: expected table and path(s)", errUsage) + } + + tbl, _, err := getTable(db, args[0]) + if err != nil { + return nil, err + } + + wtxn := db.WriteTxn(tbl.Meta) + defer wtxn.Commit() + + for _, arg := range args[1:] { + data, err := os.ReadFile(s.Path(arg)) + if err != nil { + return nil, fmt.Errorf("ReadFile(%s): %w", arg, err) + } + parts := strings.Split(string(data), "---") + for _, part := range parts { + obj, err := tbl.UnmarshalYAML([]byte(part)) + if err != nil { + return nil, fmt.Errorf("Unmarshal(%s): %w", arg, err) + } + if insert { + _, _, err = tbl.Insert(wtxn, obj) + if err != nil { + return nil, fmt.Errorf("Insert(%s): %w", arg, err) + } + } else { + _, _, err = tbl.Delete(wtxn, obj) + if err != nil { + return nil, fmt.Errorf("Delete(%s): %w", arg, err) + } + + } + } + } + return nil, nil +} + +func PrefixCmd(db *DB) script.Cmd { + return queryCmd(db, queryCmdPrefix, "Query table by prefix") +} + +func LowerBoundCmd(db *DB) script.Cmd { + return queryCmd(db, queryCmdLowerBound, "Query table by lower bound search") +} + +func ListCmd(db *DB) script.Cmd { + return queryCmd(db, queryCmdList, "List objects in the table") +} + +func GetCmd(db *DB) script.Cmd { + return queryCmd(db, queryCmdGet, "Get the first matching object") +} + +const ( + queryCmdList = iota + queryCmdPrefix + queryCmdLowerBound + queryCmdGet +) + +func queryCmd(db *DB, query int, summary string) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: summary, + Args: "(-o=) (-columns=col1,...) (-format={table*,yaml,json}) (-index=) table key", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + return runQueryCmd(query, db, s, args) + }, + ) +} + +func runQueryCmd(query int, db *DB, s *script.State, args []string) (script.WaitFunc, error) { + flags := newCmdFlagSet() + file := flags.String("o", "", "File to write results to instead of stdout") + index := flags.String("index", "", "Index to query") + format := flags.String("format", "table", "Format to write in (table, yaml, json)") + columns := flags.String("columns", "", "Comma-separated list of columns to write") + delete := flags.Bool("delete", false, "Delete all matching objects") + if err := flags.Parse(args); err != nil { + return nil, fmt.Errorf("%w: %s", errUsage, err) + } + + var cols []string + if len(*columns) > 0 { + cols = strings.Split(*columns, ",") + } + + args = flags.Args() + if len(args) < 2 { + return nil, fmt.Errorf("%w: expected table and key", errUsage) + } + + return func(*script.State) (stdout, stderr string, err error) { + tbl, txn, err := getTable(db, args[0]) + if err != nil { + return "", "", err + } + + var buf strings.Builder + var w io.Writer + if *file == "" { + w = &buf + } else { + f, err := os.OpenFile(s.Path(*file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return "", "", fmt.Errorf("OpenFile(%s): %s", *file, err) + } + defer f.Close() + w = f + } + + var it iter.Seq2[any, uint64] + switch query { + case queryCmdList: + it, err = tbl.List(txn, *index, args[1]) + case queryCmdLowerBound: + it, err = tbl.LowerBound(txn, *index, args[1]) + case queryCmdPrefix: + it, err = tbl.Prefix(txn, *index, args[1]) + case queryCmdGet: + it, err = tbl.List(txn, *index, args[1]) + if err == nil { + it = firstOfSeq2(it) + } + default: + panic("unknown query enum") + } + if err != nil { + return "", "", fmt.Errorf("query: %w", err) + } + + err = writeObjects(tbl, it, w, cols, *format) + if err != nil { + return "", "", err + } + + if *delete { + wtxn := db.WriteTxn(tbl.Meta) + count := 0 + for obj := range it { + _, hadOld, err := tbl.Delete(wtxn, obj) + if err != nil { + wtxn.Abort() + return "", "", err + } + if hadOld { + count++ + } + } + s.Logf("Deleted %d objects\n", count) + wtxn.Commit() + } + + return buf.String(), "", err + }, nil +} + +func WatchCmd(db *DB) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Watch a table for changes", + Args: "table", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + if len(args) < 1 { + return nil, fmt.Errorf("expected table name") + } + + tbl, _, err := getTable(db, args[0]) + if err != nil { + return nil, err + } + wtxn := db.WriteTxn(tbl.Meta) + iter, err := tbl.Changes(wtxn) + wtxn.Commit() + if err != nil { + return nil, err + } + + header := tbl.TableHeader() + if header == nil { + return nil, fmt.Errorf("objects in table %q not TableWritable", tbl.Meta.Name()) + } + tw := newTabWriter(&strikethroughWriter{w: s.LogWriter()}) + fmt.Fprintf(tw, "%s\n", strings.Join(header, "\t")) + + limiter := rate.NewLimiter(10.0, 1) + for { + if err := limiter.Wait(s.Context()); err != nil { + break + } + changes, watch := iter.nextAny(db.ReadTxn()) + for change := range changes { + row := change.Object.(TableWritable).TableRow() + if change.Deleted { + fmt.Fprintf(tw, "%s (deleted)%s", strings.Join(row, "\t"), magicStrikethroughNewline) + } else { + fmt.Fprintf(tw, "%s\n", strings.Join(row, "\t")) + } + } + tw.Flush() + if err := s.FlushLog(); err != nil { + return nil, err + } + select { + case <-watch: + case <-s.Context().Done(): + return nil, nil + } + } + return nil, nil + + }, + ) +} + +func firstOfSeq2[A, B any](it iter.Seq2[A, B]) iter.Seq2[A, B] { + return func(yield func(a A, b B) bool) { + for a, b := range it { + yield(a, b) + break + } + } +} + +func writeObjects(tbl *AnyTable, it iter.Seq2[any, Revision], w io.Writer, columns []string, format string) error { + if len(columns) > 0 && format != "table" { + return fmt.Errorf("-columns not supported with non-table formats") + } + switch format { + case "yaml": + sep := []byte("---\n") + first := true + for obj := range it { + if !first { + w.Write(sep) + } + first = false + + out, err := yaml.Marshal(obj) + if err != nil { + return fmt.Errorf("yaml.Marshal: %w", err) + } + if _, err := w.Write(out); err != nil { + return err + } + } + return nil + case "json": + sep := []byte("\n") + first := true + for obj := range it { + if !first { + w.Write(sep) + } + first = false + + out, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("json.Marshal: %w", err) + } + if _, err := w.Write(out); err != nil { + return err + } + } + return nil + case "table": + header := tbl.TableHeader() + if header == nil { + return fmt.Errorf("objects in table %q not TableWritable", tbl.Meta.Name()) + } + + var idxs []int + var err error + if len(columns) > 0 { + idxs, err = getColumnIndexes(columns, header) + header = columns + } else { + idxs, err = getColumnIndexes(header, header) + } + if err != nil { + return err + } + tw := newTabWriter(w) + fmt.Fprintf(tw, "%s\n", strings.Join(header, "\t")) + + for obj := range it { + row := takeColumns(obj.(TableWritable).TableRow(), idxs) + fmt.Fprintf(tw, "%s\n", strings.Join(row, "\t")) + } + return tw.Flush() + } + return fmt.Errorf("unknown format %q, expected table, yaml or json", format) +} + +func takeColumns[T any](xs []T, idxs []int) (out []T) { + for _, idx := range idxs { + out = append(out, xs[idx]) + } + return +} + +func getColumnIndexes(names []string, header []string) ([]int, error) { + columnIndexes := make([]int, 0, len(header)) +loop: + for _, name := range names { + for i, name2 := range header { + if strings.EqualFold(name, name2) { + columnIndexes = append(columnIndexes, i) + continue loop + } + } + return nil, fmt.Errorf("column %q not part of %v", name, header) + } + return columnIndexes, nil +} + +// splitHeaderLine takes a header of column names separated by any +// number of whitespaces and returns the names and their starting positions. +// e.g. "Foo Bar Baz" would result in ([Foo,Bar,Baz],[0,5,9]). +// With this information we can take a row in the database and format it +// the same way as our test data. +func splitHeaderLine(line string) (names []string, pos []int) { + start := 0 + skip := true + for i, r := range line { + switch r { + case ' ', '\t': + if !skip { + names = append(names, line[start:i]) + pos = append(pos, start) + start = -1 + } + skip = true + default: + skip = false + if start == -1 { + start = i + } + } + } + if start >= 0 && start < len(line) { + names = append(names, line[start:]) + pos = append(pos, start) + } + return +} + +// splitByPositions takes a "row" line and the positions of the header columns +// and extracts the values. +// e.g. if we have the positions [0,5,9] (from header "Foo Bar Baz") and +// line is "1 a b", then we'd extract [1,a,b]. +// The whitespace on the right of the start position (e.g. "1 \t") is trimmed. +// This of course requires that the table is properly formatted in a way that the +// header columns are indented to fit the data exactly. +func splitByPositions(line string, positions []int) []string { + out := make([]string, 0, len(positions)) + start := 0 + for _, pos := range positions[1:] { + if start >= len(line) { + out = append(out, "") + start = len(line) + continue + } + out = append(out, strings.TrimRight(line[start:min(pos, len(line))], " \t")) + start = pos + } + out = append(out, strings.TrimRight(line[min(start, len(line)):], " \t")) + return out +} + +// joinByPositions is the reverse of splitByPositions, it takes the columns of a +// row and the starting positions of each and joins into a single line. +// e.g. [1,a,b] and positions [0,5,9] expands to "1 a b". +// NOTE: This does not deal well with mixing tabs and spaces. The test input +// data should preferably just use spaces. +func joinByPositions(row []string, positions []int) string { + var w strings.Builder + prev := 0 + for i, pos := range positions { + for pad := pos - prev; pad > 0; pad-- { + w.WriteByte(' ') + } + w.WriteString(row[i]) + prev = pos + len(row[i]) + } + return w.String() +} + +// strikethroughWriter writes a line of text that is striken through +// if the line contains the magic character at the end before \n. +// This is used to strike through a tab-formatted line without messing +// up with the widths of the cells. +type strikethroughWriter struct { + buf []byte + strikethrough bool + w io.Writer +} + +var ( + // Magic character to use at the end of the line to denote that this should be + // striken through. + // This is to avoid messing up the width calculations in the tab writer, which + // would happen if ANSI codes were used directly. + magicStrikethrough = byte('\xfe') + magicStrikethroughNewline = "\xfe\n" +) + +func stripTrailingWhitespace(buf []byte) []byte { + idx := bytes.LastIndexFunc( + buf, + func(r rune) bool { + return r != ' ' && r != '\t' + }, + ) + if idx > 0 { + return buf[:idx+1] + } + return buf +} + +func (s *strikethroughWriter) Write(p []byte) (n int, err error) { + write := func(bs []byte) { + if err == nil { + _, e := s.w.Write(bs) + if e != nil { + err = e + } + } + } + for _, c := range p { + switch c { + case '\n': + s.buf = stripTrailingWhitespace(s.buf) + + if s.strikethrough { + write(beginStrikethrough) + write(s.buf) + write(endStrikethrough) + } else { + write(s.buf) + } + write(newline) + + s.buf = s.buf[:0] // reset len for reuse. + s.strikethrough = false + + if err != nil { + return 0, err + } + + case magicStrikethrough: + s.strikethrough = true + + default: + s.buf = append(s.buf, c) + } + } + return len(p), nil +} + +var ( + // Use color red and the strikethrough escape + beginStrikethrough = []byte("\033[9m\033[31m") + endStrikethrough = []byte("\033[0m") + newline = []byte("\n") +) + +var _ io.Writer = &strikethroughWriter{} + +func newTabWriter(out io.Writer) *tabwriter.Writer { + const ( + minWidth = 5 + width = 4 + padding = 3 + padChar = ' ' + flags = tabwriter.RememberWidths + ) + return tabwriter.NewWriter(out, minWidth, width, padding, padChar, flags) +} diff --git a/vendor/github.com/cilium/statedb/table.go b/vendor/github.com/cilium/statedb/table.go index 1402e9360c..1d75f07241 100644 --- a/vendor/github.com/cilium/statedb/table.go +++ b/vendor/github.com/cilium/statedb/table.go @@ -9,11 +9,14 @@ import ( "regexp" "runtime" "slices" + "sort" "strings" "sync" + "sync/atomic" "github.com/cilium/statedb/internal" "github.com/cilium/statedb/part" + "gopkg.in/yaml.v3" "github.com/cilium/statedb/index" ) @@ -45,7 +48,8 @@ func NewTable[Obj any]( fromObject: func(iobj object) index.KeySet { return idx.fromObject(iobj.data.(Obj)) }, - unique: idx.isUnique(), + fromString: idx.fromString, + unique: idx.isUnique(), } } @@ -128,6 +132,15 @@ type genTable[Obj any] struct { primaryAnyIndexer anyIndexer secondaryAnyIndexers map[string]anyIndexer indexPositions map[string]int + lastWriteTxn atomic.Pointer[txn] +} + +func (t *genTable[Obj]) acquired(txn *txn) { + t.lastWriteTxn.Store(txn) +} + +func (t *genTable[Obj]) getAcquiredInfo() string { + return t.lastWriteTxn.Load().acquiredInfo() } func (t *genTable[Obj]) tableEntry() tableEntry { @@ -168,6 +181,16 @@ func (t *genTable[Obj]) indexPos(name string) int { return t.indexPositions[name] } +func (t *genTable[Obj]) getIndexer(name string) *anyIndexer { + if name == "" || t.primaryAnyIndexer.name == name { + return &t.primaryAnyIndexer + } + if indexer, ok := t.secondaryAnyIndexers[name]; ok { + return &indexer + } + return nil +} + func (t *genTable[Obj]) PrimaryIndexer() Indexer[Obj] { return t.primaryIndexer } @@ -184,6 +207,16 @@ func (t *genTable[Obj]) Name() string { return t.table } +func (t *genTable[Obj]) Indexes() []string { + idxs := make([]string, 0, 1+len(t.secondaryAnyIndexers)) + idxs = append(idxs, t.primaryAnyIndexer.name) + for k := range t.secondaryAnyIndexers { + idxs = append(idxs, k) + } + sort.Strings(idxs) + return idxs +} + func (t *genTable[Obj]) ToTable() Table[Obj] { return t } @@ -233,6 +266,11 @@ func (t *genTable[Obj]) NumObjects(txn ReadTxn) int { return table.numObjects() } +func (t *genTable[Obj]) numDeletedObjects(txn ReadTxn) int { + table := txn.getTxn().getTableEntry(t) + return table.numDeletedObjects() +} + func (t *genTable[Obj]) Get(txn ReadTxn, q Query[Obj]) (obj Obj, revision uint64, ok bool) { obj, revision, _, ok = t.GetWatch(txn, q) return @@ -283,7 +321,7 @@ func (t *genTable[Obj]) GetWatch(txn ReadTxn, q Query[Obj]) (obj Obj, revision u } // Check that we have a full match on the key - _, secondary := decodeNonUniqueKey(key) + secondary, _ := decodeNonUniqueKey(key) if len(secondary) == len(q.key) { break } @@ -308,7 +346,10 @@ func (t *genTable[Obj]) LowerBoundWatch(txn ReadTxn, q Query[Obj]) (iter.Seq2[Ob // we watch the whole table for changes. watch := indexTxn.RootWatch() iter := indexTxn.LowerBound(q.key) - return partSeq[Obj](iter), watch + if indexTxn.unique { + return partSeq[Obj](iter), watch + } + return nonUniqueLowerBoundSeq[Obj](iter, q.key), watch } func (t *genTable[Obj]) Prefix(txn ReadTxn, q Query[Obj]) iter.Seq2[Obj, Revision] { @@ -319,7 +360,10 @@ func (t *genTable[Obj]) Prefix(txn ReadTxn, q Query[Obj]) iter.Seq2[Obj, Revisio func (t *genTable[Obj]) PrefixWatch(txn ReadTxn, q Query[Obj]) (iter.Seq2[Obj, Revision], <-chan struct{}) { indexTxn := txn.getTxn().mustIndexReadTxn(t, t.indexPos(q.index)) iter, watch := indexTxn.Prefix(q.key) - return partSeq[Obj](iter), watch + if indexTxn.unique { + return partSeq[Obj](iter), watch + } + return nonUniqueSeq[Obj](iter, true, q.key), watch } func (t *genTable[Obj]) All(txn ReadTxn) iter.Seq2[Obj, Revision] { @@ -356,7 +400,7 @@ func (t *genTable[Obj]) ListWatch(txn ReadTxn, q Query[Obj]) (iter.Seq2[Obj, Rev // iteration will continue until key length mismatches, e.g. we hit a // longer key sharing the same prefix. iter, watch := indexTxn.Prefix(q.key) - return nonUniqueSeq[Obj](iter, q.key), watch + return nonUniqueSeq[Obj](iter, false, q.key), watch } func (t *genTable[Obj]) Insert(txn WriteTxn, obj Obj) (oldObj Obj, hadOld bool, err error) { @@ -468,5 +512,18 @@ func (t *genTable[Obj]) sortableMutex() internal.SortableMutex { return t.smu } +func (t *genTable[Obj]) proto() any { + var zero Obj + return zero +} + +func (t *genTable[Obj]) unmarshalYAML(data []byte) (any, error) { + var obj Obj + if err := yaml.Unmarshal(data, &obj); err != nil { + return nil, err + } + return obj, nil +} + var _ Table[bool] = &genTable[bool]{} var _ RWTable[bool] = &genTable[bool]{} diff --git a/vendor/github.com/cilium/statedb/txn.go b/vendor/github.com/cilium/statedb/txn.go index bc4fb48cb3..4a7bc1c767 100644 --- a/vendor/github.com/cilium/statedb/txn.go +++ b/vendor/github.com/cilium/statedb/txn.go @@ -12,6 +12,7 @@ import ( "reflect" "runtime" "slices" + "sync/atomic" "time" "github.com/cilium/statedb/index" @@ -20,12 +21,18 @@ import ( ) type txn struct { - db *DB - handle string - root dbRoot + db *DB + root dbRoot + + handle string + acquiredAt time.Time // the time at which the transaction acquired the locks + duration atomic.Uint64 // the transaction duration after it finished + writeTxn +} + +type writeTxn struct { modifiedTables []*tableEntry // table entries being modified smus internal.SortableMutexes // the (sorted) table locks - acquiredAt time.Time // the time at which the transaction acquired the locks tableNames []string } @@ -46,6 +53,23 @@ func (txn *txn) getTxn() *txn { return txn } +// acquiredInfo returns the information for the "Last WriteTxn" column +// in "db tables" command. The correctness of this relies on the following assumptions: +// - txn.handle and txn.acquiredAt are not modified +// - txn.duration is atomically updated on Commit or Abort +func (txn *txn) acquiredInfo() string { + if txn == nil { + return "" + } + since := internal.PrettySince(txn.acquiredAt) + dur := time.Duration(txn.duration.Load()) + if txn.duration.Load() == 0 { + // Still locked + return fmt.Sprintf("%s (locked for %s)", txn.handle, since) + } + return fmt.Sprintf("%s (%s ago, locked for %s)", txn.handle, since, internal.PrettyDuration(dur)) +} + // txnFinalizer is called when the GC frees *txn. It checks that a WriteTxn // has been Aborted or Committed. This is a safeguard against forgetting to // Abort/Commit which would cause the table to be locked forever. @@ -336,22 +360,59 @@ func (txn *txn) delete(meta TableMeta, guardRevision Revision, data any) (object return obj, true, nil } +const ( + nonUniqueSeparator = 0x0 + nonUniqueSubstitute = 0xfe + nonUniqueSubstitute2 = 0xfd +) + +// appendEncodePrimary encodes the 'src' (primary key) into 'dst'. +func appendEncodePrimary(dst, src []byte) []byte { + for _, b := range src { + switch b { + case nonUniqueSeparator: + dst = append(dst, nonUniqueSubstitute) + case nonUniqueSubstitute: + dst = append(dst, nonUniqueSubstitute2, 0x00) + case nonUniqueSubstitute2: + dst = append(dst, nonUniqueSubstitute2, 0x01) + default: + dst = append(dst, b) + } + } + return dst +} + // encodeNonUniqueKey constructs the internal key to use with non-unique indexes. -// It concatenates the secondary key with the primary key and the length of the secondary key. -// The length is stored as unsigned 16-bit big endian. -// This allows looking up from the non-unique index with the secondary key by doing a prefix -// search. The length is used to safe-guard against indexers that don't terminate the key -// properly (e.g. if secondary key is "foo", then we don't want "foobar" to match). +// The key is constructed by concatenating the secondary key with the primary key +// along with the secondary key length. The secondary and primary key are separated +// with by a 0x0 to ensure ordering is defined by the secondary key. To make sure the +// separator does not appear in the primary key it is encoded using this schema: +// +// 0x0 => 0xfe, 0xfe => 0xfd00, 0xfd => 0xfd01 +// +// The schema tries to avoid expansion for encoded small integers, e.g. 0x0000 becomes 0xfefe. +// The length at the end is encoded as unsigned 16-bit big endian. +// +// This schema allows looking up from the non-unique index with the secondary key by +// doing a prefix search. The length is used to safe-guard against indexers that don't +// terminate the key properly (e.g. if secondary key is "foo", then we don't want +// "foobar" to match). func encodeNonUniqueKey(primary, secondary index.Key) []byte { - key := make([]byte, 0, len(secondary)+len(primary)+2) + key := make([]byte, 0, + len(secondary)+1 /* separator */ + + len(primary)+ + 2 /* space for few substitutions */ + + 2 /* length */) key = append(key, secondary...) - key = append(key, primary...) + key = append(key, nonUniqueSeparator) + key = appendEncodePrimary(key, primary) // KeySet limits size of key to 16 bits. return binary.BigEndian.AppendUint16(key, uint16(len(secondary))) } -func decodeNonUniqueKey(key []byte) (primary []byte, secondary []byte) { - // Multi-index key is [, , ] +func decodeNonUniqueKey(key []byte) (secondary []byte, encPrimary []byte) { + // Non-unique key is [, '\xfe', , ] if len(key) < 2 { return nil, nil } @@ -359,13 +420,13 @@ func decodeNonUniqueKey(key []byte) (primary []byte, secondary []byte) { if len(key) < secondaryLength { return nil, nil } - return key[secondaryLength : len(key)-2], key[:secondaryLength] + return key[:secondaryLength], key[secondaryLength+1 : len(key)-2] } func (txn *txn) Abort() { runtime.SetFinalizer(txn, nil) - // If writeTxns is nil, this transaction has already been committed or aborted, and + // If modifiedTables is nil, this transaction has already been committed or aborted, and // thus there is nothing to do. We allow this without failure to allow for defer // pattern: // @@ -384,13 +445,15 @@ func (txn *txn) Abort() { return } + txn.duration.Store(uint64(time.Since(txn.acquiredAt))) + txn.smus.Unlock() txn.db.metrics.WriteTxnDuration( txn.handle, txn.tableNames, time.Since(txn.acquiredAt)) - *txn = zeroTxn + txn.writeTxn = writeTxn{} } // Commit the transaction. Returns a ReadTxn that is the snapshot of the database at the @@ -422,6 +485,8 @@ func (txn *txn) Commit() ReadTxn { return nil } + txn.duration.Store(uint64(time.Since(txn.acquiredAt))) + db := txn.db // Commit each individual changed index to each table. @@ -477,6 +542,7 @@ func (txn *txn) Commit() ReadTxn { // Commit the transaction to build the new root tree and then // atomically store it. + txn.root = root db.root.Store(&root) db.mu.Unlock() @@ -499,11 +565,8 @@ func (txn *txn) Commit() ReadTxn { txn.tableNames, time.Since(txn.acquiredAt)) - // Zero out the transaction to make it inert and - // convert it into a ReadTxn. - *txn = zeroTxn - txn.db = db - txn.root = root + // Convert into a ReadTxn + txn.writeTxn = writeTxn{} return txn } diff --git a/vendor/github.com/cilium/statedb/types.go b/vendor/github.com/cilium/statedb/types.go index 5f817f8e71..5492e64f1d 100644 --- a/vendor/github.com/cilium/statedb/types.go +++ b/vendor/github.com/cilium/statedb/types.go @@ -4,6 +4,7 @@ package statedb import ( + "errors" "io" "iter" @@ -28,22 +29,6 @@ type Table[Obj any] interface { // Useful for generic utilities that need access to the primary key. PrimaryIndexer() Indexer[Obj] - // NumObjects returns the number of objects stored in the table. - NumObjects(ReadTxn) int - - // Initialized returns true if in this ReadTxn (snapshot of the database) - // the registered initializers have all been completed. The returned - // watch channel will be closed when the table becomes initialized. - Initialized(ReadTxn) (bool, <-chan struct{}) - - // PendingInitializers returns the set of pending initializers that - // have not yet completed. - PendingInitializers(ReadTxn) []string - - // Revision of the table. Constant for a read transaction, but - // increments in a write transaction on each Insert and Delete. - Revision(ReadTxn) Revision - // All returns a sequence of all objects in the table. All(ReadTxn) iter.Seq2[Obj, Revision] @@ -215,24 +200,48 @@ type RWTable[Obj any] interface { // TableMeta provides information about the table that is independent of // the object type (the 'Obj' constraint). type TableMeta interface { - Name() TableName // The name of the table + // Name returns the name of the table + Name() TableName + + // Indexes returns the names of the indexes + Indexes() []string + + // NumObjects returns the number of objects stored in the table. + NumObjects(ReadTxn) int + // Initialized returns true if in this ReadTxn (snapshot of the database) + // the registered initializers have all been completed. The returned + // watch channel will be closed when the table becomes initialized. + Initialized(ReadTxn) (bool, <-chan struct{}) + + // PendingInitializers returns the set of pending initializers that + // have not yet completed. + PendingInitializers(ReadTxn) []string + + // Revision of the table. Constant for a read transaction, but + // increments in a write transaction on each Insert and Delete. + Revision(ReadTxn) Revision + + // Internal unexported methods used only internally. + tableInternal +} + +type tableInternal interface { tableEntry() tableEntry tablePos() int setTablePos(int) indexPos(string) int - tableKey() []byte // The radix key for the table in the root tree + tableKey() []byte // The radix key for the table in the root tree + getIndexer(name string) *anyIndexer primary() anyIndexer // The untyped primary indexer for the table secondary() map[string]anyIndexer // Secondary indexers (if any) sortableMutex() internal.SortableMutex // The sortable mutex for locking the table for writing anyChanges(txn WriteTxn) (anyChangeIterator, error) -} - -// Iterator for iterating objects returned from queries. -type Iterator[Obj any] interface { - // Next returns the next object and its revision if ok is true, otherwise - // zero values to mean that the iteration has finished. - Next() (obj Obj, rev Revision, ok bool) + proto() any // Returns the zero value of 'Obj', e.g. the prototype + unmarshalYAML(data []byte) (any, error) // Unmarshal the data into 'Obj' + numDeletedObjects(txn ReadTxn) int // Number of objects in graveyard + acquired(*txn) + getAcquiredInfo() string } type ReadTxn interface { @@ -278,10 +287,26 @@ func ByRevision[Obj any](rev uint64) Query[Obj] { // Index implements the indexing of objects (FromObjects) and querying of objects from the index (FromKey) type Index[Obj any, Key any] struct { - Name string + // Name of the index + Name string + + // FromObject extracts key(s) from the object. The key set + // can contain 0, 1 or more keys. FromObject func(obj Obj) index.KeySet - FromKey func(key Key) index.Key - Unique bool + + // FromKey converts the index key into a raw key. + // With this we can perform Query() against this index with + // the [Key] type. + FromKey func(key Key) index.Key + + // FromString is an optional conversion from string to a raw key. + // If implemented allows script commands to query with this index. + FromString func(key string) (index.Key, error) + + // Unique marks the index as unique. Primary index must always be + // unique. A secondary index may be non-unique in which case a single + // key may map to multiple objects. + Unique bool } var _ Indexer[struct{}] = &Index[struct{}, bool]{} @@ -299,6 +324,17 @@ func (i Index[Obj, Key]) fromObject(obj Obj) index.KeySet { return i.FromObject(obj) } +var errFromStringNil = errors.New("FromString not defined") + +//nolint:unused +func (i Index[Obj, Key]) fromString(s string) (index.Key, error) { + if i.FromString == nil { + return index.Key{}, errFromStringNil + } + k, err := i.FromString(s) + return k, err +} + //nolint:unused func (i Index[Obj, Key]) isUnique() bool { return i.Unique @@ -329,6 +365,7 @@ type Indexer[Obj any] interface { indexName() string isUnique() bool fromObject(Obj) index.KeySet + fromString(string) (index.Key, error) ObjectToKey(Obj) index.Key QueryFromObject(Obj) Query[Obj] @@ -379,6 +416,9 @@ type anyIndexer struct { // object with. fromObject func(object) index.KeySet + // fromString converts string into a key. Optional. + fromString func(string) (index.Key, error) + // unique if true will index the object solely on the // values returned by fromObject. If false the primary // key of the object will be appended to the key. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index be82827cac..d135bfe023 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -9,7 +9,7 @@ suits you. ## Install -```bash +``` go get github.com/fatih/color ``` @@ -30,6 +30,18 @@ color.Magenta("And many others ..") ``` +### RGB colors + +If your terminal supports 24-bit colors, you can use RGB color codes. + +```go +color.RGB(255, 128, 0).Println("foreground orange") +color.RGB(230, 42, 42).Println("foreground red") + +color.BgRGB(255, 128, 0).Println("background orange") +color.BgRGB(230, 42, 42).Println("background red") +``` + ### Mix and reuse colors ```go @@ -49,6 +61,11 @@ boldRed.Println("This will print text in bold red.") whiteBackground := red.Add(color.BgWhite) whiteBackground.Println("Red text with white background.") + +// Mix with RGB color codes +color.RGB(255, 128, 0).AddBgRGB(0, 0, 0).Println("orange with black background") + +color.BgRGB(255, 128, 0).AddRGB(255, 255, 255).Println("orange background with white foreground") ``` ### Use your own output (io.Writer) @@ -161,10 +178,6 @@ c.Println("This prints again cyan...") To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface ## Credits diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index 81094e87c5..ee39b408e9 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -98,6 +98,9 @@ const ( FgMagenta FgCyan FgWhite + + // used internally for 256 and 24-bit coloring + foreground ) // Foreground Hi-Intensity text colors @@ -122,6 +125,9 @@ const ( BgMagenta BgCyan BgWhite + + // used internally for 256 and 24-bit coloring + background ) // Background Hi-Intensity text colors @@ -150,6 +156,30 @@ func New(value ...Attribute) *Color { return c } +// RGB returns a new foreground color in 24-bit RGB. +func RGB(r, g, b int) *Color { + return New(foreground, 2, Attribute(r), Attribute(g), Attribute(b)) +} + +// BgRGB returns a new background color in 24-bit RGB. +func BgRGB(r, g, b int) *Color { + return New(background, 2, Attribute(r), Attribute(g), Attribute(b)) +} + +// AddRGB is used to chain foreground RGB SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0). +func (c *Color) AddRGB(r, g, b int) *Color { + c.params = append(c.params, foreground, 2, Attribute(r), Attribute(g), Attribute(b)) + return c +} + +// AddRGB is used to chain background RGB SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0). +func (c *Color) AddBgRGB(r, g, b int) *Color { + c.params = append(c.params, background, 2, Attribute(r), Attribute(g), Attribute(b)) + return c +} + // Set sets the given parameters immediately. It will change the color of // output with the given SGR parameters until color.Unset() is called. func Set(p ...Attribute) *Color { @@ -401,7 +431,7 @@ func (c *Color) format() string { func (c *Color) unformat() string { //return fmt.Sprintf("%s[%dm", escape, Reset) - //for each element in sequence let's use the speficic reset escape, ou the generic one if not found + //for each element in sequence let's use the specific reset escape, or the generic one if not found format := make([]string, len(c.params)) for i, v := range c.params { format[i] = strconv.Itoa(int(Reset)) diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46e..abb2c90018 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e7..0000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index 218ab23796..9b49baf976 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "strings" @@ -169,6 +170,9 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error // AddrList gets a list of IP addresses in the system. // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func AddrList(link Link, family int) ([]Addr, error) { return pkgHandle.AddrList(link, family) } @@ -176,14 +180,17 @@ func AddrList(link Link, family int) ([]Addr, error) { // AddrList gets a list of IP addresses in the system. // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) msg := nl.NewIfAddrmsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } indexFilter := 0 @@ -212,7 +219,7 @@ func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { res = append(res, addr) } - return res, nil + return res, executeErr } func parseAddr(m []byte) (addr Addr, family int, err error) { diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index 6c340b0ce9..fa5766b801 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "github.com/vishvananda/netlink/nl" @@ -9,21 +10,27 @@ import ( // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { return pkgHandle.BridgeVlanList() } // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) req.AddData(nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } ret := make(map[int32][]*nl.BridgeVlanInfo) for _, m := range msgs { @@ -51,7 +58,7 @@ func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { } } } - return ret, nil + return ret, executeErr } // BridgeVlanAdd adds a new vlan filter entry diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go index d9f441613c..5008e7101f 100644 --- a/vendor/github.com/vishvananda/netlink/chain_linux.go +++ b/vendor/github.com/vishvananda/netlink/chain_linux.go @@ -1,6 +1,8 @@ package netlink import ( + "errors" + "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) @@ -56,6 +58,9 @@ func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error { // ChainList gets a list of chains in the system. // Equivalent to: `tc chain list`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func ChainList(link Link, parent uint32) ([]Chain, error) { return pkgHandle.ChainList(link, parent) } @@ -63,6 +68,9 @@ func ChainList(link Link, parent uint32) ([]Chain, error) { // ChainList gets a list of chains in the system. // Equivalent to: `tc chain list`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP) index := int32(0) @@ -78,9 +86,9 @@ func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Chain @@ -108,5 +116,5 @@ func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { res = append(res, chain) } - return res, nil + return res, executeErr } diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index a82eb09de2..08fb16c2bc 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -201,14 +201,20 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { // ClassList gets a list of classes in the system. // Equivalent to: `tc class show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func ClassList(link Link, parent uint32) ([]Class, error) { return pkgHandle.ClassList(link, parent) } // ClassList gets a list of classes in the system. // Equivalent to: `tc class show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP) msg := &nl.TcMsg{ @@ -222,9 +228,9 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Class @@ -295,7 +301,7 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { res = append(res, class) } - return res, nil + return res, executeErr } func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) { diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index ba022453b3..69c5eca034 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "net" + "strings" "time" "github.com/vishvananda/netlink/nl" @@ -44,6 +45,9 @@ type InetFamily uint8 // ConntrackTableList returns the flow list of a table of a specific family // conntrack -L [table] [options] List conntrack or expectation table +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { return pkgHandle.ConntrackTableList(table, family) } @@ -70,7 +74,7 @@ func ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *Conntrac // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation // -// Deprecated: use [ConntrackDeleteFilter] instead. +// Deprecated: use [ConntrackDeleteFilters] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { return pkgHandle.ConntrackDeleteFilters(table, family, filter) } @@ -83,10 +87,13 @@ func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters // ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed // conntrack -L [table] [options] List conntrack or expectation table +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { - res, err := h.dumpConntrackTable(table, family) - if err != nil { - return nil, err + res, executeErr := h.dumpConntrackTable(table, family) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } // Deserialize all the flows @@ -95,7 +102,7 @@ func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) result = append(result, parseRawData(dataRaw)) } - return result, nil + return result, executeErr } // ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed @@ -158,6 +165,7 @@ func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFam } var matched uint + var errMsgs []string for _, dataRaw := range res { flow := parseRawData(dataRaw) for _, filter := range filters { @@ -165,14 +173,18 @@ func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFam req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already req2.AddRawData(dataRaw[4:]) - req2.Execute(unix.NETLINK_NETFILTER, 0) - matched++ - // flow is already deleted, no need to match on other filters and continue to the next flow. - break + if _, err = req2.Execute(unix.NETLINK_NETFILTER, 0); err == nil { + matched++ + // flow is already deleted, no need to match on other filters and continue to the next flow. + break + } + errMsgs = append(errMsgs, fmt.Sprintf("failed to delete conntrack flow '%s': %s", flow.String(), err.Error())) } } } - + if len(errMsgs) > 0 { + return matched, fmt.Errorf(strings.Join(errMsgs, "; ")) + } return matched, nil } diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go index 0bfdf422d1..0049048dc3 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go @@ -33,7 +33,7 @@ func ConntrackTableFlush(table ConntrackTableType) error { // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation // -// Deprecated: use [ConntrackDeleteFilter] instead. +// Deprecated: use [ConntrackDeleteFilters] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { return 0, ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go index d98801dbbe..45d8ee4b6b 100644 --- a/vendor/github.com/vishvananda/netlink/devlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "strings" @@ -466,6 +467,8 @@ func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) { // DevLinkGetDeviceList provides a pointer to devlink devices and nil error, // otherwise returns an error code. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) if err != nil { @@ -478,9 +481,9 @@ func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } devices, err := parseDevLinkDeviceList(msgs) if err != nil { @@ -489,11 +492,14 @@ func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { for _, d := range devices { h.getEswitchAttrs(f, d) } - return devices, nil + return devices, executeErr } // DevLinkGetDeviceList provides a pointer to devlink devices and nil error, // otherwise returns an error code. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func DevLinkGetDeviceList() ([]*DevlinkDevice, error) { return pkgHandle.DevLinkGetDeviceList() } @@ -646,6 +652,8 @@ func parseDevLinkAllPortList(msgs [][]byte) ([]*DevlinkPort, error) { // DevLinkGetPortList provides a pointer to devlink ports and nil error, // otherwise returns an error code. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) { f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) if err != nil { @@ -658,19 +666,21 @@ func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) { req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } ports, err := parseDevLinkAllPortList(msgs) if err != nil { return nil, err } - return ports, nil + return ports, executeErr } // DevLinkGetPortList provides a pointer to devlink ports and nil error, // otherwise returns an error code. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func DevLinkGetAllPortList() ([]*DevlinkPort, error) { return pkgHandle.DevLinkGetAllPortList() } @@ -738,15 +748,18 @@ func (h *Handle) DevlinkGetDeviceResources(bus string, device string) (*DevlinkR // DevlinkGetDeviceParams returns parameters for devlink device // Equivalent to: `devlink dev param show /` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) if err != nil { return nil, err } req.Flags |= unix.NLM_F_DUMP - respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + respmsg, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var params []*DevlinkParam for _, m := range respmsg { @@ -761,11 +774,14 @@ func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkPa params = append(params, p) } - return params, nil + return params, executeErr } // DevlinkGetDeviceParams returns parameters for devlink device // Equivalent to: `devlink dev param show /` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { return pkgHandle.DevlinkGetDeviceParams(bus, device) } diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index 87cd18f8e4..19306612ee 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -405,14 +405,20 @@ func (h *Handle) filterModify(filter Filter, proto, flags int) error { // FilterList gets a list of filters in the system. // Equivalent to: `tc filter show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func FilterList(link Link, parent uint32) ([]Filter, error) { return pkgHandle.FilterList(link, parent) } // FilterList gets a list of filters in the system. // Equivalent to: `tc filter show`. +// // Generally returns nothing if link and parent are not specified. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP) msg := &nl.TcMsg{ @@ -426,9 +432,9 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Filter @@ -516,7 +522,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } } - return res, nil + return res, executeErr } func toTcGen(attrs *ActionAttrs, tcgen *nl.TcGen) { @@ -920,9 +926,11 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { actionnStatistic = (*ActionStatistic)(s) } } - action.Attrs().Statistics = actionnStatistic - action.Attrs().Timestamp = actionTimestamp - actions = append(actions, action) + if action != nil { + action.Attrs().Statistics = actionnStatistic + action.Attrs().Timestamp = actionTimestamp + actions = append(actions, action) + } } return actions, nil } diff --git a/vendor/github.com/vishvananda/netlink/fou.go b/vendor/github.com/vishvananda/netlink/fou.go index 71e73c37a0..ea9f6cf673 100644 --- a/vendor/github.com/vishvananda/netlink/fou.go +++ b/vendor/github.com/vishvananda/netlink/fou.go @@ -1,16 +1,7 @@ package netlink import ( - "errors" -) - -var ( - // ErrAttrHeaderTruncated is returned when a netlink attribute's header is - // truncated. - ErrAttrHeaderTruncated = errors.New("attribute header truncated") - // ErrAttrBodyTruncated is returned when a netlink attribute's body is - // truncated. - ErrAttrBodyTruncated = errors.New("attribute body truncated") + "net" ) type Fou struct { @@ -18,4 +9,8 @@ type Fou struct { Port int Protocol int EncapType int + Local net.IP + Peer net.IP + PeerPort int + IfIndex int } diff --git a/vendor/github.com/vishvananda/netlink/fou_linux.go b/vendor/github.com/vishvananda/netlink/fou_linux.go index ed55b2b790..7645a5a5c2 100644 --- a/vendor/github.com/vishvananda/netlink/fou_linux.go +++ b/vendor/github.com/vishvananda/netlink/fou_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package netlink @@ -5,6 +6,8 @@ package netlink import ( "encoding/binary" "errors" + "log" + "net" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" @@ -29,6 +32,12 @@ const ( FOU_ATTR_IPPROTO FOU_ATTR_TYPE FOU_ATTR_REMCSUM_NOPARTIAL + FOU_ATTR_LOCAL_V4 + FOU_ATTR_LOCAL_V6 + FOU_ATTR_PEER_V4 + FOU_ATTR_PEER_V6 + FOU_ATTR_PEER_PORT + FOU_ATTR_IFINDEX FOU_ATTR_MAX = FOU_ATTR_REMCSUM_NOPARTIAL ) @@ -128,10 +137,14 @@ func (h *Handle) FouDel(f Fou) error { return nil } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func FouList(fam int) ([]Fou, error) { return pkgHandle.FouList(fam) } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) FouList(fam int) ([]Fou, error) { fam_id, err := FouFamilyId() if err != nil { @@ -150,9 +163,9 @@ func (h *Handle) FouList(fam int) ([]Fou, error) { req.AddRawData(raw) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(err, ErrDumpInterrupted) { + return nil, executeErr } fous := make([]Fou, 0, len(msgs)) @@ -165,45 +178,32 @@ func (h *Handle) FouList(fam int) ([]Fou, error) { fous = append(fous, f) } - return fous, nil + return fous, executeErr } func deserializeFouMsg(msg []byte) (Fou, error) { - // we'll skip to byte 4 to first attribute - msg = msg[3:] - var shift int fou := Fou{} - for { - // attribute header is at least 16 bits - if len(msg) < 4 { - return fou, ErrAttrHeaderTruncated - } - - lgt := int(binary.BigEndian.Uint16(msg[0:2])) - if len(msg) < lgt+4 { - return fou, ErrAttrBodyTruncated - } - attr := binary.BigEndian.Uint16(msg[2:4]) - - shift = lgt + 3 - switch attr { + for attr := range nl.ParseAttributes(msg[4:]) { + switch attr.Type { case FOU_ATTR_AF: - fou.Family = int(msg[5]) + fou.Family = int(attr.Value[0]) case FOU_ATTR_PORT: - fou.Port = int(binary.BigEndian.Uint16(msg[5:7])) - // port is 2 bytes - shift = lgt + 2 + fou.Port = int(networkOrder.Uint16(attr.Value)) case FOU_ATTR_IPPROTO: - fou.Protocol = int(msg[5]) + fou.Protocol = int(attr.Value[0]) case FOU_ATTR_TYPE: - fou.EncapType = int(msg[5]) - } - - msg = msg[shift:] - - if len(msg) < 4 { - break + fou.EncapType = int(attr.Value[0]) + case FOU_ATTR_LOCAL_V4, FOU_ATTR_LOCAL_V6: + fou.Local = net.IP(attr.Value) + case FOU_ATTR_PEER_V4, FOU_ATTR_PEER_V6: + fou.Peer = net.IP(attr.Value) + case FOU_ATTR_PEER_PORT: + fou.PeerPort = int(networkOrder.Uint16(attr.Value)) + case FOU_ATTR_IFINDEX: + fou.IfIndex = int(native.Uint16(attr.Value)) + default: + log.Printf("unknown fou attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) } } diff --git a/vendor/github.com/vishvananda/netlink/fou_unspecified.go b/vendor/github.com/vishvananda/netlink/fou_unspecified.go index 3a8365bfe6..7e550151ad 100644 --- a/vendor/github.com/vishvananda/netlink/fou_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/fou_unspecified.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netlink diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go index 772e5834a2..7bdaad97b4 100644 --- a/vendor/github.com/vishvananda/netlink/genetlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "syscall" @@ -126,6 +127,8 @@ func parseFamilies(msgs [][]byte) ([]*GenlFamily, error) { return families, nil } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { msg := &nl.Genlmsg{ Command: nl.GENL_CTRL_CMD_GETFAMILY, @@ -133,13 +136,19 @@ func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { } req := h.newNetlinkRequest(nl.GENL_ID_CTRL, unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr + } + families, err := parseFamilies(msgs) if err != nil { return nil, err } - return parseFamilies(msgs) + return families, executeErr } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func GenlFamilyList() ([]*GenlFamily, error) { return pkgHandle.GenlFamilyList() } diff --git a/vendor/github.com/vishvananda/netlink/gtp_linux.go b/vendor/github.com/vishvananda/netlink/gtp_linux.go index f5e160ba5c..377dcae5c0 100644 --- a/vendor/github.com/vishvananda/netlink/gtp_linux.go +++ b/vendor/github.com/vishvananda/netlink/gtp_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "strings" @@ -74,6 +75,8 @@ func parsePDP(msgs [][]byte) ([]*PDP, error) { return pdps, nil } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) GTPPDPList() ([]*PDP, error) { f, err := h.GenlFamilyGet(nl.GENL_GTP_NAME) if err != nil { @@ -85,13 +88,19 @@ func (h *Handle) GTPPDPList() ([]*PDP, error) { } req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(err, ErrDumpInterrupted) { + return nil, executeErr + } + pdps, err := parsePDP(msgs) if err != nil { return nil, err } - return parsePDP(msgs) + return pdps, executeErr } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func GTPPDPList() ([]*PDP, error) { return pkgHandle.GTPPDPList() } diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index f820cdb678..e09a6cfe54 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -377,6 +377,13 @@ const ( NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2 ) +type NetkitScrub int + +const ( + NETKIT_SCRUB_NONE NetkitScrub = 0 + NETKIT_SCRUB_DEFAULT NetkitScrub = 1 +) + func (n *Netkit) IsPrimary() bool { return n.isPrimary } @@ -391,6 +398,9 @@ type Netkit struct { Mode NetkitMode Policy NetkitPolicy PeerPolicy NetkitPolicy + Scrub NetkitScrub + PeerScrub NetkitScrub + supportsScrub bool isPrimary bool peerLinkAttrs LinkAttrs } @@ -403,6 +413,10 @@ func (n *Netkit) Type() string { return "netkit" } +func (n *Netkit) SupportsScrub() bool { + return n.supportsScrub +} + // Veth devices must specify PeerName on create type Veth struct { LinkAttrs @@ -761,19 +775,19 @@ const ( ) var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{ - BOND_XMIT_HASH_POLICY_LAYER2: "layer2", - BOND_XMIT_HASH_POLICY_LAYER3_4: "layer3+4", - BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3", - BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3", - BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4", + BOND_XMIT_HASH_POLICY_LAYER2: "layer2", + BOND_XMIT_HASH_POLICY_LAYER3_4: "layer3+4", + BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3", + BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3", + BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4", BOND_XMIT_HASH_POLICY_VLAN_SRCMAC: "vlan+srcmac", } var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{ - "layer2": BOND_XMIT_HASH_POLICY_LAYER2, - "layer3+4": BOND_XMIT_HASH_POLICY_LAYER3_4, - "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3, - "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3, - "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4, + "layer2": BOND_XMIT_HASH_POLICY_LAYER2, + "layer3+4": BOND_XMIT_HASH_POLICY_LAYER3_4, + "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3, + "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3, + "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4, "vlan+srcmac": BOND_XMIT_HASH_POLICY_VLAN_SRCMAC, } diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index d713612a90..52491c5804 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -3,6 +3,7 @@ package netlink import ( "bytes" "encoding/binary" + "errors" "fmt" "io/ioutil" "net" @@ -1807,20 +1808,20 @@ func (h *Handle) LinkDel(link Link) error { } func (h *Handle) linkByNameDump(name string) (Link, error) { - links, err := h.LinkList() - if err != nil { - return nil, err + links, executeErr := h.LinkList() + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } for _, link := range links { if link.Attrs().Name == name { - return link, nil + return link, executeErr } // support finding interfaces also via altnames for _, altName := range link.Attrs().AltNames { if altName == name { - return link, nil + return link, executeErr } } } @@ -1828,25 +1829,33 @@ func (h *Handle) linkByNameDump(name string) (Link, error) { } func (h *Handle) linkByAliasDump(alias string) (Link, error) { - links, err := h.LinkList() - if err != nil { - return nil, err + links, executeErr := h.LinkList() + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } for _, link := range links { if link.Attrs().Alias == alias { - return link, nil + return link, executeErr } } return nil, LinkNotFoundError{fmt.Errorf("Link alias %s not found", alias)} } // LinkByName finds a link by name and returns a pointer to the object. +// +// If the kernel doesn't support IFLA_IFNAME, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func LinkByName(name string) (Link, error) { return pkgHandle.LinkByName(name) } // LinkByName finds a link by name and returns a pointer to the object. +// +// If the kernel doesn't support IFLA_IFNAME, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func (h *Handle) LinkByName(name string) (Link, error) { if h.lookupByDump { return h.linkByNameDump(name) @@ -1879,12 +1888,20 @@ func (h *Handle) LinkByName(name string) (Link, error) { // LinkByAlias finds a link by its alias and returns a pointer to the object. // If there are multiple links with the alias it returns the first one +// +// If the kernel doesn't support IFLA_IFALIAS, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func LinkByAlias(alias string) (Link, error) { return pkgHandle.LinkByAlias(alias) } // LinkByAlias finds a link by its alias and returns a pointer to the object. // If there are multiple links with the alias it returns the first one +// +// If the kernel doesn't support IFLA_IFALIAS, this method will fall back to +// filtering a dump of all link names. In this case, if the returned error is +// [ErrDumpInterrupted] the result may be missing or outdated. func (h *Handle) LinkByAlias(alias string) (Link, error) { if h.lookupByDump { return h.linkByAliasDump(alias) @@ -2321,6 +2338,9 @@ func LinkList() ([]Link, error) { // LinkList gets a list of link devices. // Equivalent to: `ip link show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) LinkList() ([]Link, error) { // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need // to get the message ourselves to parse link type. @@ -2331,9 +2351,9 @@ func (h *Handle) LinkList() ([]Link, error) { attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) req.AddData(attr) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Link @@ -2345,7 +2365,7 @@ func (h *Handle) LinkList() ([]Link, error) { res = append(res, link) } - return res, nil + return res, executeErr } // LinkUpdate is used to pass information back from LinkSubscribe() @@ -2381,6 +2401,10 @@ type LinkSubscribeOptions struct { // LinkSubscribeWithOptions work like LinkSubscribe but enable to // provide additional options to modify the behavior. Currently, the // namespace can be provided as well as an error callback. +// +// When options.ListExisting is true, options.ErrorCallback may be +// called with [ErrDumpInterrupted] to indicate that results from +// the initial dump of links may be inconsistent or incomplete. func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error { if options.Namespace == nil { none := netns.None() @@ -2440,6 +2464,9 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c continue } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil { + cberr(ErrDumpInterrupted) + } if m.Header.Type == unix.NLMSG_DONE { continue } @@ -2649,6 +2676,8 @@ func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode))) data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy))) data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy))) + data.AddRtAttr(nl.IFLA_NETKIT_SCRUB, nl.Uint32Attr(uint32(nk.Scrub))) + data.AddRtAttr(nl.IFLA_NETKIT_PEER_SCRUB, nl.Uint32Attr(uint32(nk.PeerScrub))) if (flag & unix.NLM_F_EXCL) == 0 { // Modifying peer link attributes will not take effect @@ -2709,6 +2738,12 @@ func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) { netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4])) case nl.IFLA_NETKIT_PEER_POLICY: netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_SCRUB: + netkit.supportsScrub = true + netkit.Scrub = NetkitScrub(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_PEER_SCRUB: + netkit.supportsScrub = true + netkit.PeerScrub = NetkitScrub(native.Uint32(datum.Value[0:4])) } } } diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index 2d93044a6e..1c6f2958ae 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "syscall" @@ -206,6 +207,9 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { // NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func NeighList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighList(linkIndex, family) } @@ -213,6 +217,9 @@ func NeighList(linkIndex, family int) ([]Neigh, error) { // NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func NeighProxyList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighProxyList(linkIndex, family) } @@ -220,6 +227,9 @@ func NeighProxyList(linkIndex, family int) ([]Neigh, error) { // NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { return h.NeighListExecute(Ndmsg{ Family: uint8(family), @@ -230,6 +240,9 @@ func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { // NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link, ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { return h.NeighListExecute(Ndmsg{ Family: uint8(family), @@ -239,18 +252,24 @@ func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { } // NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func NeighListExecute(msg Ndmsg) ([]Neigh, error) { return pkgHandle.NeighListExecute(msg) } // NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) req.AddData(&msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Neigh @@ -281,7 +300,7 @@ func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { res = append(res, *neigh) } - return res, nil + return res, executeErr } func NeighDeserialize(m []byte) (*Neigh, error) { @@ -364,6 +383,10 @@ type NeighSubscribeOptions struct { // NeighSubscribeWithOptions work like NeighSubscribe but enable to // provide additional options to modify the behavior. Currently, the // namespace can be provided as well as an error callback. +// +// When options.ListExisting is true, options.ErrorCallback may be +// called with [ErrDumpInterrupted] to indicate that results from +// the initial dump of links may be inconsistent or incomplete. func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, options NeighSubscribeOptions) error { if options.Namespace == nil { none := netns.None() @@ -428,6 +451,9 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < continue } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil { + cberr(ErrDumpInterrupted) + } if m.Header.Type == unix.NLMSG_DONE { if listExisting { // This will be called after handling AF_UNSPEC diff --git a/vendor/github.com/vishvananda/netlink/netlink_linux.go b/vendor/github.com/vishvananda/netlink/netlink_linux.go index a20d293d87..7416e30510 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/netlink_linux.go @@ -9,3 +9,6 @@ const ( FAMILY_V6 = nl.FAMILY_V6 FAMILY_MPLS = nl.FAMILY_MPLS ) + +// ErrDumpInterrupted is an alias for [nl.ErrDumpInterrupted]. +var ErrDumpInterrupted = nl.ErrDumpInterrupted diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index 0b5be470cb..6dfa16cc28 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -38,6 +38,8 @@ const ( IFLA_NETKIT_POLICY IFLA_NETKIT_PEER_POLICY IFLA_NETKIT_MODE + IFLA_NETKIT_SCRUB + IFLA_NETKIT_PEER_SCRUB IFLA_NETKIT_MAX = IFLA_NETKIT_MODE ) diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 6cecc4517a..4d2732a9e8 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -4,6 +4,7 @@ package nl import ( "bytes" "encoding/binary" + "errors" "fmt" "net" "os" @@ -11,6 +12,7 @@ import ( "sync" "sync/atomic" "syscall" + "time" "unsafe" "github.com/vishvananda/netns" @@ -43,6 +45,26 @@ var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0} // ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets var EnableErrorMessageReporting bool = false +// ErrDumpInterrupted is an instance of errDumpInterrupted, used to report that +// a netlink function has set the NLM_F_DUMP_INTR flag in a response message, +// indicating that the results may be incomplete or inconsistent. +var ErrDumpInterrupted = errDumpInterrupted{} + +// errDumpInterrupted is an error type, used to report that NLM_F_DUMP_INTR was +// set in a netlink response. +type errDumpInterrupted struct{} + +func (errDumpInterrupted) Error() string { + return "results may be incomplete or inconsistent" +} + +// Before errDumpInterrupted was introduced, EINTR was returned when a netlink +// response had NLM_F_DUMP_INTR. Retain backward compatibility with code that +// may be checking for EINTR using Is. +func (e errDumpInterrupted) Is(target error) bool { + return target == unix.EINTR +} + // GetIPFamily returns the family type of a net.IP. func GetIPFamily(ip net.IP) int { if len(ip) <= net.IPv4len { @@ -492,22 +514,26 @@ func (req *NetlinkRequest) AddRawData(data []byte) { // Execute the request against the given sockType. // Returns a list of netlink messages in serialized format, optionally filtered // by resType. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) { var res [][]byte err := req.ExecuteIter(sockType, resType, func(msg []byte) bool { res = append(res, msg) return true }) - if err != nil { + if err != nil && !errors.Is(err, ErrDumpInterrupted) { return nil, err } - return res, nil + return res, err } // ExecuteIter executes the request against the given sockType. // Calls the provided callback func once for each netlink message. // If the callback returns false, it is not called again, but // the remaining messages are consumed/discarded. +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. // // Thread safety: ExecuteIter holds a lock on the socket until // it finishes iteration so the callback must not call back into @@ -559,6 +585,8 @@ func (req *NetlinkRequest) ExecuteIter(sockType int, resType uint16, f func(msg return err } + dumpIntr := false + done: for { msgs, from, err := s.Receive() @@ -580,7 +608,7 @@ done: } if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { - return syscall.Errno(unix.EINTR) + dumpIntr = true } if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { @@ -634,6 +662,9 @@ done: } } } + if dumpIntr { + return ErrDumpInterrupted + } return nil } @@ -656,9 +687,11 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { } type NetlinkSocket struct { - fd int32 - file *os.File - lsa unix.SockaddrNetlink + fd int32 + file *os.File + lsa unix.SockaddrNetlink + sendTimeout int64 // Access using atomic.Load/StoreInt64 + receiveTimeout int64 // Access using atomic.Load/StoreInt64 sync.Mutex } @@ -802,8 +835,44 @@ func (s *NetlinkSocket) GetFd() int { return int(s.fd) } +func (s *NetlinkSocket) GetTimeouts() (send, receive time.Duration) { + return time.Duration(atomic.LoadInt64(&s.sendTimeout)), + time.Duration(atomic.LoadInt64(&s.receiveTimeout)) +} + func (s *NetlinkSocket) Send(request *NetlinkRequest) error { - return unix.Sendto(int(s.fd), request.Serialize(), 0, &s.lsa) + rawConn, err := s.file.SyscallConn() + if err != nil { + return err + } + var ( + deadline time.Time + innerErr error + ) + sendTimeout := atomic.LoadInt64(&s.sendTimeout) + if sendTimeout != 0 { + deadline = time.Now().Add(time.Duration(sendTimeout)) + } + if err := s.file.SetWriteDeadline(deadline); err != nil { + return err + } + serializedReq := request.Serialize() + err = rawConn.Write(func(fd uintptr) (done bool) { + innerErr = unix.Sendto(int(s.fd), serializedReq, 0, &s.lsa) + return innerErr != unix.EWOULDBLOCK + }) + if innerErr != nil { + return innerErr + } + if err != nil { + // The timeout was previously implemented using SO_SNDTIMEO on a blocking + // socket. So, continue to return EAGAIN when the timeout is reached. + if errors.Is(err, os.ErrDeadlineExceeded) { + return unix.EAGAIN + } + return err + } + return nil } func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) { @@ -812,20 +881,33 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli return nil, nil, err } var ( + deadline time.Time fromAddr *unix.SockaddrNetlink rb [RECEIVE_BUFFER_SIZE]byte nr int from unix.Sockaddr innerErr error ) + receiveTimeout := atomic.LoadInt64(&s.receiveTimeout) + if receiveTimeout != 0 { + deadline = time.Now().Add(time.Duration(receiveTimeout)) + } + if err := s.file.SetReadDeadline(deadline); err != nil { + return nil, nil, err + } err = rawConn.Read(func(fd uintptr) (done bool) { nr, from, innerErr = unix.Recvfrom(int(fd), rb[:], 0) return innerErr != unix.EWOULDBLOCK }) if innerErr != nil { - err = innerErr + return nil, nil, innerErr } if err != nil { + // The timeout was previously implemented using SO_RCVTIMEO on a blocking + // socket. So, continue to return EAGAIN when the timeout is reached. + if errors.Is(err, os.ErrDeadlineExceeded) { + return nil, nil, unix.EAGAIN + } return nil, nil, err } fromAddr, ok := from.(*unix.SockaddrNetlink) @@ -847,16 +929,14 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli // SetSendTimeout allows to set a send timeout on the socket func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error { - // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine - // remains stuck on a send on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout) + atomic.StoreInt64(&s.sendTimeout, timeout.Nano()) + return nil } // SetReceiveTimeout allows to set a receive timeout on the socket func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { - // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine - // remains stuck on a recvmsg on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) + atomic.StoreInt64(&s.receiveTimeout, timeout.Nano()) + return nil } // SetReceiveBufferSize allows to set a receive buffer size on the socket diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 1ba25d3cd4..aa51e3b470 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "syscall" @@ -8,10 +9,14 @@ import ( "golang.org/x/sys/unix" ) +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func LinkGetProtinfo(link Link) (Protinfo, error) { return pkgHandle.LinkGetProtinfo(link) } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { base := link.Attrs() h.ensureIndex(base) @@ -19,9 +24,9 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) - if err != nil { - return pi, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return pi, executeErr } for _, m := range msgs { @@ -43,7 +48,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { } pi = parseProtinfo(infos) - return pi, nil + return pi, executeErr } } return pi, fmt.Errorf("Device with index %d not found", base.Index) diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index e732ae3bd6..22cf0e5825 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "io/ioutil" "strconv" @@ -338,6 +339,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { // QdiscList gets a list of qdiscs in the system. // Equivalent to: `tc qdisc show`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func QdiscList(link Link) ([]Qdisc, error) { return pkgHandle.QdiscList(link) } @@ -345,6 +349,9 @@ func QdiscList(link Link) ([]Qdisc, error) { // QdiscList gets a list of qdiscs in the system. // Equivalent to: `tc qdisc show`. // The list can be filtered by link. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) index := int32(0) @@ -359,9 +366,9 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []Qdisc @@ -497,7 +504,7 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { res = append(res, qdisc) } - return res, nil + return res, executeErr } func parsePfifoFastData(qdisc Qdisc, value []byte) error { diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go index 036399db6b..9bb7507321 100644 --- a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go +++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -3,6 +3,7 @@ package netlink import ( "bytes" "encoding/binary" + "errors" "fmt" "net" @@ -85,19 +86,25 @@ func execRdmaSetLink(req *nl.NetlinkRequest) error { // RdmaLinkList gets a list of RDMA link devices. // Equivalent to: `rdma dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RdmaLinkList() ([]*RdmaLink, error) { return pkgHandle.RdmaLinkList() } // RdmaLinkList gets a list of RDMA link devices. // Equivalent to: `rdma dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) { proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) - msgs, err := req.Execute(unix.NETLINK_RDMA, 0) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_RDMA, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []*RdmaLink @@ -109,17 +116,23 @@ func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) { res = append(res, link) } - return res, nil + return res, executeErr } // RdmaLinkByName finds a link by name and returns a pointer to the object if // found and nil error, otherwise returns error code. +// +// If the returned error is [ErrDumpInterrupted], the result may be missing or +// outdated and the caller should retry. func RdmaLinkByName(name string) (*RdmaLink, error) { return pkgHandle.RdmaLinkByName(name) } // RdmaLinkByName finds a link by name and returns a pointer to the object if // found and nil error, otherwise returns error code. +// +// If the returned error is [ErrDumpInterrupted], the result may be missing or +// outdated and the caller should retry. func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) { links, err := h.RdmaLinkList() if err != nil { @@ -288,6 +301,8 @@ func RdmaLinkDel(name string) error { } // RdmaLinkDel deletes an rdma link. +// +// If the returned error is [ErrDumpInterrupted], the caller should retry. func (h *Handle) RdmaLinkDel(name string) error { link, err := h.RdmaLinkByName(name) if err != nil { @@ -307,6 +322,7 @@ func (h *Handle) RdmaLinkDel(name string) error { // RdmaLinkAdd adds an rdma link for the specified type to the network device. // Similar to: rdma link add NAME type TYPE netdev NETDEV +// // NAME - specifies the new name of the rdma link to add // TYPE - specifies which rdma type to use. Link types: // rxe - Soft RoCE driver diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index 0cd4f8363a..28a132a2f0 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -3,6 +3,7 @@ package netlink import ( "bytes" "encoding/binary" + "errors" "fmt" "net" "strconv" @@ -1163,6 +1164,9 @@ func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.R // RouteList gets a list of routes in the system. // Equivalent to: `ip route show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RouteList(link Link, family int) ([]Route, error) { return pkgHandle.RouteList(link, family) } @@ -1170,6 +1174,9 @@ func RouteList(link Link, family int) ([]Route, error) { // RouteList gets a list of routes in the system. // Equivalent to: `ip route show`. // The list can be filtered by link and ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RouteList(link Link, family int) ([]Route, error) { routeFilter := &Route{} if link != nil { @@ -1188,6 +1195,9 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { var res []Route err := h.RouteListFilteredIter(family, filter, filterMask, func(route Route) (cont bool) { @@ -1202,17 +1212,22 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) // RouteListFilteredIter passes each route that matches the filter to the given iterator func. Iteration continues // until all routes are loaded or the func returns false. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { return pkgHandle.RouteListFilteredIter(family, filter, filterMask, f) } +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) rtmsg := &nl.RtMsg{} rtmsg.Family = uint8(family) var parseErr error - err := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool { + executeErr := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool { msg := nl.DeserializeRtMsg(m) if family != FAMILY_ALL && msg.Family != uint8(family) { // Ignore routes not matching requested family @@ -1270,13 +1285,13 @@ func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uin } return f(route) }) - if err != nil { - return err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return executeErr } if parseErr != nil { return parseErr } - return nil + return executeErr } // deserializeRoute decodes a binary netlink message into a Route struct @@ -1684,6 +1699,10 @@ type RouteSubscribeOptions struct { // RouteSubscribeWithOptions work like RouteSubscribe but enable to // provide additional options to modify the behavior. Currently, the // namespace can be provided as well as an error callback. +// +// When options.ListExisting is true, options.ErrorCallback may be +// called with [ErrDumpInterrupted] to indicate that results from +// the initial dump of links may be inconsistent or incomplete. func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error { if options.Namespace == nil { none := netns.None() @@ -1743,6 +1762,9 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < continue } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 && cberr != nil { + cberr(ErrDumpInterrupted) + } if m.Header.Type == unix.NLMSG_DONE { continue } diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index ddff99cfad..dba99147b2 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -2,6 +2,7 @@ package netlink import ( "bytes" + "errors" "fmt" "net" @@ -183,12 +184,18 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { // RuleList lists rules in the system. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RuleList(family int) ([]Rule, error) { return pkgHandle.RuleList(family) } // RuleList lists rules in the system. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RuleList(family int) ([]Rule, error) { return h.RuleListFiltered(family, nil, 0) } @@ -196,20 +203,26 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { // RuleListFiltered gets a list of rules in the system filtered by the // specified rule template `filter`. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { return pkgHandle.RuleListFiltered(family, filter, filterMask) } // RuleListFiltered lists rules in the system. // Equivalent to: ip rule list +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res = make([]Rule, 0) @@ -306,7 +319,7 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( res = append(res, *rule) } - return res, nil + return res, executeErr } func (pr *RulePortRange) toRtAttrData() []byte { diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index 4eb4aeafbd..82891bc2e0 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -157,6 +157,9 @@ func (u *UnixSocket) deserialize(b []byte) error { } // SocketGet returns the Socket identified by its local and remote addresses. +// +// If the returned error is [ErrDumpInterrupted], the search for a result may +// be incomplete and the caller should retry. func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) { var protocol uint8 var localIP, remoteIP net.IP @@ -232,6 +235,9 @@ func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) { } // SocketGet returns the Socket identified by its local and remote addresses. +// +// If the returned error is [ErrDumpInterrupted], the search for a result may +// be incomplete and the caller should retry. func SocketGet(local, remote net.Addr) (*Socket, error) { return pkgHandle.SocketGet(local, remote) } @@ -283,6 +289,9 @@ func SocketDestroy(local, remote net.Addr) error { } // SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -295,9 +304,9 @@ func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) // Do the query and parse the result var result []*InetDiagTCPInfoResp - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} + var err error if err = sockInfo.deserialize(msg); err != nil { return false } @@ -315,18 +324,24 @@ func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { return pkgHandle.SocketDiagTCPInfo(family) } // SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -339,27 +354,32 @@ func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) { // Do the query and parse the result var result []*Socket - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } result = append(result, sockInfo) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagTCP(family uint8) ([]*Socket, error) { return pkgHandle.SocketDiagTCP(family) } // SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { // Construct the request var extensions uint8 @@ -377,14 +397,14 @@ func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) // Do the query and parse the result var result []*InetDiagUDPInfoResp - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } var attrs []syscall.NetlinkRouteAttr + var err error if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { return false } @@ -397,18 +417,24 @@ func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) result = append(result, res) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { return pkgHandle.SocketDiagUDPInfo(family) } // SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -421,27 +447,32 @@ func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) { // Do the query and parse the result var result []*Socket - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &Socket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } result = append(result, sockInfo) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagUDP(family uint8) ([]*Socket, error) { return pkgHandle.SocketDiagUDP(family) } // UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { // Construct the request var extensions uint8 @@ -456,10 +487,9 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { }) var result []*UnixDiagInfoResp - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &UnixSocket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } @@ -469,6 +499,7 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { } var attrs []syscall.NetlinkRouteAttr + var err error if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { return false } @@ -480,18 +511,24 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { result = append(result, res) return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { return pkgHandle.UnixSocketDiagInfo() } // UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { // Construct the request req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) @@ -501,10 +538,9 @@ func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { }) var result []*UnixSocket - var err error - err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + executeErr := req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { sockInfo := &UnixSocket{} - if err = sockInfo.deserialize(msg); err != nil { + if err := sockInfo.deserialize(msg); err != nil { return false } @@ -514,13 +550,16 @@ func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { } return true }) - if err != nil { - return nil, err + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } - return result, nil + return result, executeErr } // UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func UnixSocketDiag() ([]*UnixSocket, error) { return pkgHandle.UnixSocketDiag() } diff --git a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go index 20c82f9c76..c1dd00a864 100644 --- a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go @@ -52,8 +52,10 @@ func (s *XDPSocket) deserialize(b []byte) error { return nil } -// XDPSocketGet returns the XDP socket identified by its inode number and/or +// SocketXDPGetInfo returns the XDP socket identified by its inode number and/or // socket cookie. Specify the cookie as SOCK_ANY_COOKIE if +// +// If the returned error is [ErrDumpInterrupted], the caller should retry. func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) { // We have a problem here: dumping AF_XDP sockets currently does not support // filtering. We thus need to dump all XSKs and then only filter afterwards @@ -85,6 +87,9 @@ func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) { } // SocketDiagXDP requests XDP_DIAG_INFO for XDP family sockets. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func SocketDiagXDP() ([]*XDPDiagInfoResp, error) { var result []*XDPDiagInfoResp err := socketDiagXDPExecutor(func(m syscall.NetlinkMessage) error { @@ -105,10 +110,10 @@ func SocketDiagXDP() ([]*XDPDiagInfoResp, error) { result = append(result, res) return nil }) - if err != nil { + if err != nil && !errors.Is(err, ErrDumpInterrupted) { return nil, err } - return result, nil + return result, err } // socketDiagXDPExecutor requests XDP_DIAG_INFO for XDP family sockets. @@ -128,6 +133,7 @@ func socketDiagXDPExecutor(receiver func(syscall.NetlinkMessage) error) error { return err } + dumpIntr := false loop: for { msgs, from, err := s.Receive() @@ -142,6 +148,9 @@ loop: } for _, m := range msgs { + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { + dumpIntr = true + } switch m.Header.Type { case unix.NLMSG_DONE: break loop @@ -154,6 +163,9 @@ loop: } } } + if dumpIntr { + return ErrDumpInterrupted + } return nil } diff --git a/vendor/github.com/vishvananda/netlink/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/vdpa_linux.go index 7c15986d0f..c14877a295 100644 --- a/vendor/github.com/vishvananda/netlink/vdpa_linux.go +++ b/vendor/github.com/vishvananda/netlink/vdpa_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "syscall" @@ -118,6 +119,9 @@ func VDPADelDev(name string) error { // VDPAGetDevList returns list of VDPA devices // Equivalent to: `vdpa dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func VDPAGetDevList() ([]*VDPADev, error) { return pkgHandle.VDPAGetDevList() } @@ -130,6 +134,9 @@ func VDPAGetDevByName(name string) (*VDPADev, error) { // VDPAGetDevConfigList returns list of VDPA devices configurations // Equivalent to: `vdpa dev config show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func VDPAGetDevConfigList() ([]*VDPADevConfig, error) { return pkgHandle.VDPAGetDevConfigList() } @@ -148,6 +155,9 @@ func VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { // VDPAGetMGMTDevList returns list of mgmt devices // Equivalent to: `vdpa mgmtdev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { return pkgHandle.VDPAGetMGMTDevList() } @@ -261,9 +271,9 @@ func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) req.AddData(a) } - resp, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err + resp, executeErr := req.Execute(unix.NETLINK_GENERIC, 0) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } messages := make([]vdpaNetlinkMessage, 0, len(resp)) for _, m := range resp { @@ -273,10 +283,13 @@ func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) } messages = append(messages, attrs) } - return messages, nil + return messages, executeErr } // dump all devices if dev is nil +// +// If dev is nil and the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { var extraFlags int var attrs []*nl.RtAttr @@ -285,9 +298,9 @@ func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { } else { extraFlags = extraFlags | unix.NLM_F_DUMP } - messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs) - if err != nil { - return nil, err + messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } devs := make([]*VDPADev, 0, len(messages)) for _, m := range messages { @@ -295,10 +308,13 @@ func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { d.parseAttributes(m) devs = append(devs, d) } - return devs, nil + return devs, executeErr } // dump all devices if dev is nil +// +// If dev is nil, and the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { var extraFlags int var attrs []*nl.RtAttr @@ -307,9 +323,9 @@ func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { } else { extraFlags = extraFlags | unix.NLM_F_DUMP } - messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs) - if err != nil { - return nil, err + messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } cfgs := make([]*VDPADevConfig, 0, len(messages)) for _, m := range messages { @@ -317,10 +333,13 @@ func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { cfg.parseAttributes(m) cfgs = append(cfgs, cfg) } - return cfgs, nil + return cfgs, executeErr } // dump all devices if dev is nil +// +// If dev is nil and the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { var extraFlags int var attrs []*nl.RtAttr @@ -336,9 +355,9 @@ func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { } else { extraFlags = extraFlags | unix.NLM_F_DUMP } - messages, err := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs) - if err != nil { - return nil, err + messages, executeErr := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } cfgs := make([]*VDPAMGMTDev, 0, len(messages)) for _, m := range messages { @@ -346,7 +365,7 @@ func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { cfg.parseAttributes(m) cfgs = append(cfgs, cfg) } - return cfgs, nil + return cfgs, executeErr } // VDPANewDev adds new VDPA device @@ -385,6 +404,9 @@ func (h *Handle) VDPADelDev(name string) error { // VDPAGetDevList returns list of VDPA devices // Equivalent to: `vdpa dev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) VDPAGetDevList() ([]*VDPADev, error) { return h.vdpaDevGet(nil) } @@ -404,6 +426,9 @@ func (h *Handle) VDPAGetDevByName(name string) (*VDPADev, error) { // VDPAGetDevConfigList returns list of VDPA devices configurations // Equivalent to: `vdpa dev config show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) VDPAGetDevConfigList() ([]*VDPADevConfig, error) { return h.vdpaDevConfigGet(nil) } @@ -441,6 +466,9 @@ func (h *Handle) VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStat // VDPAGetMGMTDevList returns list of mgmt devices // Equivalent to: `vdpa mgmtdev show` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { return h.vdpaMGMTDevGet(nil, nil) } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index d526739ceb..bf143a1b13 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" @@ -215,6 +216,9 @@ func (h *Handle) XfrmPolicyDel(policy *XfrmPolicy) error { // XfrmPolicyList gets a list of xfrm policies in the system. // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func XfrmPolicyList(family int) ([]XfrmPolicy, error) { return pkgHandle.XfrmPolicyList(family) } @@ -222,15 +226,18 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { // XfrmPolicyList gets a list of xfrm policies in the system. // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []XfrmPolicy @@ -243,7 +250,7 @@ func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { return nil, err } } - return res, nil + return res, executeErr } // XfrmPolicyGet gets a the policy described by the index or selector, if found. diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 554f2498c2..2f46146514 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "errors" "fmt" "net" "time" @@ -382,6 +383,9 @@ func (h *Handle) XfrmStateDel(state *XfrmState) error { // XfrmStateList gets a list of xfrm states in the system. // Equivalent to: `ip [-4|-6] xfrm state show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func XfrmStateList(family int) ([]XfrmState, error) { return pkgHandle.XfrmStateList(family) } @@ -389,12 +393,15 @@ func XfrmStateList(family int) ([]XfrmState, error) { // XfrmStateList gets a list of xfrm states in the system. // Equivalent to: `ip xfrm state show`. // The list can be filtered by ip family. +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) - if err != nil { - return nil, err + msgs, executeErr := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr } var res []XfrmState @@ -407,7 +414,7 @@ func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { return nil, err } } - return res, nil + return res, executeErr } // XfrmStateGet gets the xfrm state described by the ID, if found. diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a5f904197f..d09555506f 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -25,6 +25,7 @@ linters: - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused @@ -302,3 +303,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index fb107426e7..4b361d0269 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,35 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + ## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 ### Added @@ -3081,7 +3110,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 5904bb7070..945a07d2b0 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 9158072535..bb33965574 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -631,11 +631,8 @@ should be canceled. ### Approvers -- [Chester Cheung](https://github.com/hanyuancheung), Tencent - ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -644,11 +641,13 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b04695b242..a1228a2124 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 9a65707038..efec278905 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -89,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 59992984d4..ffa9b61258 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -111,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fdbb..6cbefceadf 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index f2fc3929b1..e3db438a09 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -152,14 +152,17 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return m.delegate.Int64Counter(name, options...) } - i := &siCounter{name: name, opts: options} cfg := metric.NewInt64CounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } + i := &siCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -172,14 +175,17 @@ func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCou return m.delegate.Int64UpDownCounter(name, options...) } - i := &siUpDownCounter{name: name, opts: options} cfg := metric.NewInt64UpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } + i := &siUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -192,14 +198,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return m.delegate.Int64Histogram(name, options...) } - i := &siHistogram{name: name, opts: options} cfg := metric.NewInt64HistogramConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siHistogram)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } + i := &siHistogram{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -212,14 +221,17 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return m.delegate.Int64Gauge(name, options...) } - i := &siGauge{name: name, opts: options} cfg := metric.NewInt64GaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } + i := &siGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -232,14 +244,17 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return m.delegate.Int64ObservableCounter(name, options...) } - i := &aiCounter{name: name, opts: options} cfg := metric.NewInt64ObservableCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } + i := &aiCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -252,14 +267,17 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return m.delegate.Int64ObservableUpDownCounter(name, options...) } - i := &aiUpDownCounter{name: name, opts: options} cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } + i := &aiUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -272,14 +290,17 @@ func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64Observa return m.delegate.Int64ObservableGauge(name, options...) } - i := &aiGauge{name: name, opts: options} cfg := metric.NewInt64ObservableGaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } + i := &aiGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -292,14 +313,17 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return m.delegate.Float64Counter(name, options...) } - i := &sfCounter{name: name, opts: options} cfg := metric.NewFloat64CounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } + i := &sfCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -312,14 +336,17 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return m.delegate.Float64UpDownCounter(name, options...) } - i := &sfUpDownCounter{name: name, opts: options} cfg := metric.NewFloat64UpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } + i := &sfUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -332,14 +359,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return m.delegate.Float64Histogram(name, options...) } - i := &sfHistogram{name: name, opts: options} cfg := metric.NewFloat64HistogramConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfHistogram)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } + i := &sfHistogram{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -352,14 +382,17 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return m.delegate.Float64Gauge(name, options...) } - i := &sfGauge{name: name, opts: options} cfg := metric.NewFloat64GaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } + i := &sfGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -372,14 +405,17 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return m.delegate.Float64ObservableCounter(name, options...) } - i := &afCounter{name: name, opts: options} cfg := metric.NewFloat64ObservableCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } + i := &afCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -392,14 +428,17 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return m.delegate.Float64ObservableUpDownCounter(name, options...) } - i := &afUpDownCounter{name: name, opts: options} cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } + i := &afUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -412,14 +451,17 @@ func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64Obs return m.delegate.Float64ObservableGauge(name, options...) } - i := &afGauge{name: name, opts: options} cfg := metric.NewFloat64ObservableGaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } + i := &afGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -487,6 +529,7 @@ func (c *registration) setDelegate(m metric.Meter) { reg, err := m.RegisterCallback(c.function, insts...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 9b1da2c02b..b2fe3e41d3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,7 +20,8 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e40233..a535782e1d 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 4d36b98cf4..0a29a2f13d 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -23,6 +23,10 @@ { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fce..0000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 78b40f3ed2..6d3c7b1f40 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.30.0" + return "1.31.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 0c32f4fc46..cdebdb5eb7 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.30.0 + version: v1.31.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,12 +29,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.52.0 + version: v0.53.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.6.0 + version: v0.7.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +42,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.9 + version: v0.0.10 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 0000000000..de58dfb8dc --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 0000000000..e3784123c8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 0000000000..060fd6c64c --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f30..7688c356b7 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3ec6..617b4a4762 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 61f511f97a..0c5f64aa8b 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize if cs, ok := c.(connectionStater); ok { @@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2199,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398a1..6ff6bee7e9 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780ec..ac76165ceb 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -8,7 +8,7 @@ // This package currently lacks some features found in an alternative // and more actively maintained WebSocket package: // -// https://pkg.go.dev/nhooyr.io/websocket +// https://pkg.go.dev/github.com/coder/websocket package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb332174..109997d77c 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e12..6e08a76a71 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index e14b766a32..ac54ecaba0 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -656,7 +656,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -666,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2d..6f15ba1eaf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 3f1d3d4cb2..f08abd434f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75e..745e5c7e6c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451f..dd2262a407 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a288944..8cf3670bda 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 0000000000..07ac8e09d1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 0000000000..297e97bce9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 01a70b2463..de3b462489 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -495,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -1922,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2187,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2356,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2431,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2933,11 +2938,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3210,6 +3216,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3226,6 +3233,7 @@ const ( STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3624,6 +3632,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 684a5168da..8aa6d77c01 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 61d74b592d..da428f4253 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a28c9e3e89..bf45bfec78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ab5d1fe8ea..71c67162b7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index c523090e7c..9476628fa0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 01e6ea7804..b9e85f3cf0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7aa610b1e7..a48b68a764 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 92af771b44..ea00e8522a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b27ef5e6f1..91c6468717 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 237a2cefb3..8cbf38d639 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4a5c555a36..a2df734191 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index a02fb49a5f..2479137923 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e26a7c61b2..d265f146ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c48f7c2103..3f2d644396 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ad4b9aace7..5d8b727a1c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -155,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1bc1a5adb2..af30da5578 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index d3e38f681a..f485dbf456 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6c778c2327..1893e2fe88 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 37281cf51a..16a4017da0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9889f6a559..a5459e766f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 9f2550dc31..3a69e45496 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,31 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - Subvol uint64 - _ [11]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -516,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -557,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -3766,7 +3794,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3806,7 +3834,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3951,7 +3979,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -4609,7 +4637,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5213,7 +5241,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba6..4e613cf633 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493f..93a798ab63 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..2a7cf70da6 --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/txtar/archive.go b/vendor/golang.org/x/tools/txtar/archive.go new file mode 100644 index 0000000000..fd95f1e64a --- /dev/null +++ b/vendor/golang.org/x/tools/txtar/archive.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package txtar implements a trivial text-based file archive format. +// +// The goals for the format are: +// +// - be trivial enough to create and edit by hand. +// - be able to store trees of text files describing go command test cases. +// - diff nicely in git history and code reviews. +// +// Non-goals include being a completely general archive format, +// storing binary data, storing file modes, storing special files like +// symbolic links, and so on. +// +// # Txtar format +// +// A txtar archive is zero or more comment lines and then a sequence of file entries. +// Each file entry begins with a file marker line of the form "-- FILENAME --" +// and is followed by zero or more file content lines making up the file data. +// The comment or file content ends at the next file marker line. +// The file marker line must begin with the three-byte sequence "-- " +// and end with the three-byte sequence " --", but the enclosed +// file name can be surrounding by additional white space, +// all of which is stripped. +// +// If the txtar file is missing a trailing newline on the final line, +// parsers should consider a final newline to be present anyway. +// +// There are no possible syntax errors in a txtar archive. +package txtar + +import ( + "bytes" + "fmt" + "os" + "strings" +) + +// An Archive is a collection of files. +type Archive struct { + Comment []byte + Files []File +} + +// A File is a single file in an archive. +type File struct { + Name string // name of file ("foo/bar.txt") + Data []byte // text content of file +} + +// Format returns the serialized form of an Archive. +// It is assumed that the Archive data structure is well-formed: +// a.Comment and all a.File[i].Data contain no file marker lines, +// and all a.File[i].Name is non-empty. +func Format(a *Archive) []byte { + var buf bytes.Buffer + buf.Write(fixNL(a.Comment)) + for _, f := range a.Files { + fmt.Fprintf(&buf, "-- %s --\n", f.Name) + buf.Write(fixNL(f.Data)) + } + return buf.Bytes() +} + +// ParseFile parses the named file as an archive. +func ParseFile(file string) (*Archive, error) { + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + return Parse(data), nil +} + +// Parse parses the serialized form of an Archive. +// The returned Archive holds slices of data. +func Parse(data []byte) *Archive { + a := new(Archive) + var name string + a.Comment, name, data = findFileMarker(data) + for name != "" { + f := File{name, nil} + f.Data, name, data = findFileMarker(data) + a.Files = append(a.Files, f) + } + return a +} + +var ( + newlineMarker = []byte("\n-- ") + marker = []byte("-- ") + markerEnd = []byte(" --") +) + +// findFileMarker finds the next file marker in data, +// extracts the file name, and returns the data before the marker, +// the file name, and the data after the marker. +// If there is no next marker, findFileMarker returns before = fixNL(data), name = "", after = nil. +func findFileMarker(data []byte) (before []byte, name string, after []byte) { + var i int + for { + if name, after = isMarker(data[i:]); name != "" { + return data[:i], name, after + } + j := bytes.Index(data[i:], newlineMarker) + if j < 0 { + return fixNL(data), "", nil + } + i += j + 1 // positioned at start of new possible marker + } +} + +// isMarker checks whether data begins with a file marker line. +// If so, it returns the name from the line and the data after the line. +// Otherwise it returns name == "" with an unspecified after. +func isMarker(data []byte) (name string, after []byte) { + if !bytes.HasPrefix(data, marker) { + return "", nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + data, after = data[:i], data[i+1:] + } + if !(bytes.HasSuffix(data, markerEnd) && len(data) >= len(marker)+len(markerEnd)) { + return "", nil + } + return strings.TrimSpace(string(data[len(marker) : len(data)-len(markerEnd)])), after +} + +// If data is empty or ends in \n, fixNL returns data. +// Otherwise fixNL returns a new slice consisting of data with a final \n added. +func fixNL(data []byte) []byte { + if len(data) == 0 || data[len(data)-1] == '\n' { + return data + } + d := make([]byte, len(data)+1) + copy(d, data) + d[len(data)] = '\n' + return d +} diff --git a/vendor/golang.org/x/tools/txtar/fs.go b/vendor/golang.org/x/tools/txtar/fs.go new file mode 100644 index 0000000000..fc8df12c18 --- /dev/null +++ b/vendor/golang.org/x/tools/txtar/fs.go @@ -0,0 +1,257 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package txtar + +import ( + "errors" + "fmt" + "io" + "io/fs" + "path" + "slices" + "time" +) + +// FS returns the file system form of an Archive. +// It returns an error if any of the file names in the archive +// are not valid file system names. +// The archive must not be modified while the FS is in use. +// +// If the file system detects that it has been modified, calls to the +// file system return an ErrModified error. +func FS(a *Archive) (fs.FS, error) { + // Create a filesystem with a root directory. + root := &node{fileinfo: fileinfo{path: ".", mode: readOnlyDir}} + fsys := &filesystem{a, map[string]*node{root.path: root}} + + if err := initFiles(fsys); err != nil { + return nil, fmt.Errorf("cannot create fs.FS from txtar.Archive: %s", err) + } + return fsys, nil +} + +const ( + readOnly fs.FileMode = 0o444 // read only mode + readOnlyDir = readOnly | fs.ModeDir +) + +// ErrModified indicates that file system returned by FS +// noticed that the underlying archive has been modified +// since the call to FS. Detection of modification is best effort, +// to help diagnose misuse of the API, and is not guaranteed. +var ErrModified error = errors.New("txtar.Archive has been modified during txtar.FS") + +// A filesystem is a simple in-memory file system for txtar archives, +// represented as a map from valid path names to information about the +// files or directories they represent. +// +// File system operations are read only. Modifications to the underlying +// *Archive may race. To help prevent this, the filesystem tries +// to detect modification during Open and return ErrModified if it +// is able to detect a modification. +type filesystem struct { + ar *Archive + nodes map[string]*node +} + +// node is a file or directory in the tree of a filesystem. +type node struct { + fileinfo // fs.FileInfo and fs.DirEntry implementation + idx int // index into ar.Files (for files) + entries []fs.DirEntry // subdirectories and files (for directories) +} + +var _ fs.FS = (*filesystem)(nil) +var _ fs.DirEntry = (*node)(nil) + +// initFiles initializes fsys from fsys.ar.Files. Returns an error if there are any +// invalid file names or collisions between file or directories. +func initFiles(fsys *filesystem) error { + for idx, file := range fsys.ar.Files { + name := file.Name + if !fs.ValidPath(name) { + return fmt.Errorf("file %q is an invalid path", name) + } + + n := &node{idx: idx, fileinfo: fileinfo{path: name, size: len(file.Data), mode: readOnly}} + if err := insert(fsys, n); err != nil { + return err + } + } + return nil +} + +// insert adds node n as an entry to its parent directory within the filesystem. +func insert(fsys *filesystem, n *node) error { + if m := fsys.nodes[n.path]; m != nil { + return fmt.Errorf("duplicate path %q", n.path) + } + fsys.nodes[n.path] = n + + // fsys.nodes contains "." to prevent infinite loops. + parent, err := directory(fsys, path.Dir(n.path)) + if err != nil { + return err + } + parent.entries = append(parent.entries, n) + return nil +} + +// directory returns the directory node with the path dir and lazily-creates it +// if it does not exist. +func directory(fsys *filesystem, dir string) (*node, error) { + if m := fsys.nodes[dir]; m != nil && m.IsDir() { + return m, nil // pre-existing directory + } + + n := &node{fileinfo: fileinfo{path: dir, mode: readOnlyDir}} + if err := insert(fsys, n); err != nil { + return nil, err + } + return n, nil +} + +// dataOf returns the data associated with the file t. +// May return ErrModified if fsys.ar has been modified. +func dataOf(fsys *filesystem, n *node) ([]byte, error) { + if n.idx >= len(fsys.ar.Files) { + return nil, ErrModified + } + + f := fsys.ar.Files[n.idx] + if f.Name != n.path || len(f.Data) != n.size { + return nil, ErrModified + } + return f.Data, nil +} + +func (fsys *filesystem) Open(name string) (fs.File, error) { + if !fs.ValidPath(name) { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} + } + + n := fsys.nodes[name] + switch { + case n == nil: + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} + case n.IsDir(): + return &openDir{fileinfo: n.fileinfo, entries: n.entries}, nil + default: + data, err := dataOf(fsys, n) + if err != nil { + return nil, err + } + return &openFile{fileinfo: n.fileinfo, data: data}, nil + } +} + +func (fsys *filesystem) ReadFile(name string) ([]byte, error) { + file, err := fsys.Open(name) + if err != nil { + return nil, err + } + if file, ok := file.(*openFile); ok { + return slices.Clone(file.data), nil + } + return nil, &fs.PathError{Op: "read", Path: name, Err: fs.ErrInvalid} +} + +// A fileinfo implements fs.FileInfo and fs.DirEntry for a given archive file. +type fileinfo struct { + path string // unique path to the file or directory within a filesystem + size int + mode fs.FileMode +} + +var _ fs.FileInfo = (*fileinfo)(nil) +var _ fs.DirEntry = (*fileinfo)(nil) + +func (i *fileinfo) Name() string { return path.Base(i.path) } +func (i *fileinfo) Size() int64 { return int64(i.size) } +func (i *fileinfo) Mode() fs.FileMode { return i.mode } +func (i *fileinfo) Type() fs.FileMode { return i.mode.Type() } +func (i *fileinfo) ModTime() time.Time { return time.Time{} } +func (i *fileinfo) IsDir() bool { return i.mode&fs.ModeDir != 0 } +func (i *fileinfo) Sys() any { return nil } +func (i *fileinfo) Info() (fs.FileInfo, error) { return i, nil } + +// An openFile is a regular (non-directory) fs.File open for reading. +type openFile struct { + fileinfo + data []byte + offset int64 +} + +var _ fs.File = (*openFile)(nil) + +func (f *openFile) Stat() (fs.FileInfo, error) { return &f.fileinfo, nil } +func (f *openFile) Close() error { return nil } +func (f *openFile) Read(b []byte) (int, error) { + if f.offset >= int64(len(f.data)) { + return 0, io.EOF + } + if f.offset < 0 { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.data[f.offset:]) + f.offset += int64(n) + return n, nil +} + +func (f *openFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + // offset += 0 + case 1: + offset += f.offset + case 2: + offset += int64(len(f.data)) + } + if offset < 0 || offset > int64(len(f.data)) { + return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid} + } + f.offset = offset + return offset, nil +} + +func (f *openFile) ReadAt(b []byte, offset int64) (int, error) { + if offset < 0 || offset > int64(len(f.data)) { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.data[offset:]) + if n < len(b) { + return n, io.EOF + } + return n, nil +} + +// A openDir is a directory fs.File (so also an fs.ReadDirFile) open for reading. +type openDir struct { + fileinfo + entries []fs.DirEntry + offset int +} + +var _ fs.ReadDirFile = (*openDir)(nil) + +func (d *openDir) Stat() (fs.FileInfo, error) { return &d.fileinfo, nil } +func (d *openDir) Close() error { return nil } +func (d *openDir) Read(b []byte) (int, error) { + return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid} +} + +func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) { + n := len(d.entries) - d.offset + if n == 0 && count > 0 { + return nil, io.EOF + } + if count > 0 && n > count { + n = count + } + list := make([]fs.DirEntry, n) + copy(list, d.entries[d.offset:d.offset+n]) + d.offset += n + return list, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index fdd6fa86cc..924ba4f365 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -616,7 +616,7 @@ func (t *transportReader) ReadHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(len(header)) + t.windowHandler(n) return n, nil } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 187fbf1195..a96b6a6bff 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.67.0" +const Version = "1.67.1" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3b4..8f9e592f87 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df222..0e72d85378 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c84..024ffebd3d 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12d7..08dad7692c 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,7 +5,7 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40b2..fa790e0ff1 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b08..d2f549497e 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8db..67a51b327c 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356b6..fd4d0c83d2 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd01211..d9b9d916a2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b42..7f67cbb6e9 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02ff2..bef5a25fbb 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b85b..9404270de0 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a20c..0d5b546e0e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e44b..7c1f66c8c1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb739..78be9df342 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd6..0000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e23c..077712c2c5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55a2..f72ddd882f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd212247..6254f5de41 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 0000000000..9f6c32a7d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee63..b6849d6692 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d454..741b5ed29c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a29..0000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d8a..79e186667b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f33386..0000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd090..832a7988f1 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2ba3..1ffddf6877 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f6862..fb8e15e8da 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b03c..c36d4a9cd7 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f29284..78445d116f 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 8561755427..ebcb4a8ab1 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830eda3..002e0047ae 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6ebe..742cb518c4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2af0..0000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ead8..0015fcb35d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d386990a..479527b58d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d88..246156561c 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb0750..6dea75cd5b 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c50..c7e860fcd6 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -29,11 +29,9 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d38..87da199a38 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd910..b99d4d2410 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb828..1761bc9c69 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -55,11 +55,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb6d..19de8d371f 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -553,20 +551,6 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cbc7..8f206a6611 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -120,6 +120,7 @@ package structpb import ( base64 "encoding/base64" + json "encoding/json" protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error { func (x *Struct) Reset() { *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Struct) String() string { @@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {} func (x *Struct) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -296,19 +295,20 @@ type Value struct { // NewValue constructs a Value from a general-purpose Go interface. // -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. @@ -320,12 +320,20 @@ func NewValue(v any) (*Value, error) { return NewBoolValue(v), nil case int: return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil case int32: return NewNumberValue(float64(v)), nil case int64: return NewNumberValue(float64(v)), nil case uint: return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil case uint32: return NewNumberValue(float64(v)), nil case uint64: @@ -334,6 +342,12 @@ func NewValue(v any) (*Value, error) { return NewNumberValue(float64(v)), nil case float64: return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil case string: if !utf8.ValidString(v) { return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) @@ -441,11 +455,9 @@ func (x *Value) UnmarshalJSON(b []byte) error { func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -456,7 +468,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,11 +625,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error { func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -628,7 +638,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -742,44 +752,6 @@ func file_google_protobuf_struct_proto_init() { if File_google_protobuf_struct_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645b0..0d20722d70 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826aa..006060e569 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -125,11 +123,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +136,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -181,11 +177,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +190,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -237,11 +231,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +244,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -293,11 +285,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +298,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,11 +339,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +352,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -405,11 +393,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +406,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,11 +447,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +460,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,11 +501,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +514,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -629,116 +611,6 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/helm.sh/helm/v3/internal/resolver/resolver.go b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go index c5fc636433..b6f45da9e2 100644 --- a/vendor/helm.sh/helm/v3/internal/resolver/resolver.go +++ b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go @@ -172,7 +172,7 @@ func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string Repository: d.Repository, Version: version, } - // The version are already sorted and hence the first one to satisfy the constraint is used + // The versions are already sorted and hence the first one to satisfy the constraint is used for _, ver := range vs { v, err := semver.NewVersion(ver.Version) // OCI does not need URLs diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go index 4e4eacc60d..d29bb5f871 100644 --- a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go @@ -260,7 +260,7 @@ func fixLongPath(path string) string { // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. // // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. + // will work; empirically the path must be less than 248 bytes long. if len(path) < 248 { // Don't fix. (This is how Go 1.7 and earlier worked, // not automatically generating the \\?\ form) diff --git a/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go index dc832ed80e..7cd1dace96 100644 --- a/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go +++ b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go @@ -65,7 +65,7 @@ func CertPoolFromFile(filename string) (*x509.CertPool, error) { return cp, nil } -// CertFromFilePair returns an tls.Certificate containing the +// CertFromFilePair returns a tls.Certificate containing the // certificates public/private key pair from a pair of given PEM-encoded files. // Returns an error if the file could not be read, a certificate could not // be parsed, or if the file does not contain any certificates diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go index f0292a0a3f..7ca40c88aa 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/install.go +++ b/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -55,7 +55,7 @@ import ( "helm.sh/helm/v3/pkg/storage/driver" ) -// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine +// notesFileSuffix that we want to treat special. It goes through the templating engine // but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually // wants to see this file after rendering in the status command. However, it must be a suffix // since there can be filepath in front of it. @@ -307,7 +307,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma } if driver.ContainsSystemLabels(i.Labels) { - return nil, fmt.Errorf("user suplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) + return nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) } rel := i.createRelease(chrt, vals, i.Labels) @@ -389,7 +389,7 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma } } - // If Replace is true, we need to supercede the last release. + // If Replace is true, we need to supersede the last release. if i.Replace { if err := i.replaceRelease(rel); err != nil { return nil, err @@ -631,7 +631,7 @@ func createOrOpenFile(filename string, append bool) (*os.File, error) { return os.Create(filename) } -// check if the directory exists to create file. creates if don't exists +// check if the directory exists to create file. creates if doesn't exist func ensureDirectoryForFile(file string) error { baseDir := path.Dir(file) _, err := os.Stat(baseDir) diff --git a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go index 15bdae8da5..a08d68495f 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go +++ b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go @@ -279,7 +279,7 @@ func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[strin } if driver.ContainsSystemLabels(u.Labels) { - return nil, nil, fmt.Errorf("user suplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) + return nil, nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) } // Store an upgraded release. diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go index 196e5f81d6..8bb5493465 100644 --- a/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go @@ -101,7 +101,7 @@ func ensureArchive(name string, raw *os.File) error { return nil } -// isGZipApplication checks whether the achieve is of the application/x-gzip type. +// isGZipApplication checks whether the archive is of the application/x-gzip type. func isGZipApplication(data []byte) bool { sig := []byte("\x1F\x8B\x08") return bytes.HasPrefix(data, sig) diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go index 205d99e093..36a3419272 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go @@ -137,7 +137,7 @@ func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path str // If any dependency is not a part of Chart.yaml // then this should be added to chartDependencies. // However, if the dependency is already specified in Chart.yaml - // we should not add it, as it would be anyways processed from Chart.yaml + // we should not add it, as it would be processed from Chart.yaml anyway. Loop: for _, existing := range c.Dependencies() { diff --git a/vendor/helm.sh/helm/v3/pkg/cli/output/output.go b/vendor/helm.sh/helm/v3/pkg/cli/output/output.go index a46c977ad9..01649c812f 100644 --- a/vendor/helm.sh/helm/v3/pkg/cli/output/output.go +++ b/vendor/helm.sh/helm/v3/pkg/cli/output/output.go @@ -73,7 +73,7 @@ func (o Format) Write(out io.Writer, w Writer) error { } // ParseFormat takes a raw string and returns the matching Format. -// If the format does not exists, ErrInvalidFormatType is returned +// If the format does not exist, ErrInvalidFormatType is returned func ParseFormat(s string) (out Format, err error) { switch s { case Table.String(): diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go index d5340575d1..ec4056d275 100644 --- a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go +++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -173,7 +173,7 @@ func (m *Manager) Update() error { // has some information about them and, when possible, the index files // locally. // TODO(mattfarina): Repositories should be explicitly added by end users - // rather than automattic. In Helm v4 require users to add repositories. They + // rather than automatic. In Helm v4 require users to add repositories. They // should have to add them in order to make sure they are aware of the // repositories and opt-in to any locations, for security. repoNames, err = m.ensureMissingRepos(repoNames, req) diff --git a/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/vendor/helm.sh/helm/v3/pkg/engine/engine.go index 058cfa7493..df3a600a39 100644 --- a/vendor/helm.sh/helm/v3/pkg/engine/engine.go +++ b/vendor/helm.sh/helm/v3/pkg/engine/engine.go @@ -169,7 +169,7 @@ func tplFun(parent *template.Template, includedNames map[string]int, strict bool }) // We need a .New template, as template text which is just blanks - // or comments after parsing out defines just addes new named + // or comments after parsing out defines just adds new named // template definitions without changing the main template. // https://pkg.go.dev/text/template#Template.Parse // Use the parent's name for lack of a better way to identify the tpl diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go index 86a7d698ca..75e85098d1 100644 --- a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go +++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go @@ -131,7 +131,7 @@ func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (met return res, err } for _, resource := range resList.APIResources { - // if a resource contains a "/" it's referencing a subresource. we don't support suberesource for now. + // if a resource contains a "/" it's referencing a subresource. we don't support subresource for now. if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") { res = resource res.Group = gvk.Group diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go index 22d7bf0a1b..6b4f1fc770 100644 --- a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go @@ -34,7 +34,7 @@ const ( DataHomeEnvVar = "HELM_DATA_HOME" ) -// lazypath is an lazy-loaded path buffer for the XDG base directory specification. +// lazypath is a lazy-loaded path buffer for the XDG base directory specification. type lazypath string func (l lazypath) path(helmEnvVar, xdgEnvVar string, defaultFn func() string, elem ...string) string { diff --git a/vendor/helm.sh/helm/v3/pkg/ignore/doc.go b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go index 5245d410ee..1f5e918477 100644 --- a/vendor/helm.sh/helm/v3/pkg/ignore/doc.go +++ b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go @@ -26,7 +26,7 @@ The formatting rules are as follows: - Parsing is line-by-line - Empty lines are ignored - - Lines the begin with # (comments) will be ignored + - Lines that begin with # (comments) will be ignored - Leading and trailing spaces are always ignored - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) - There is no support for multi-line patterns diff --git a/vendor/helm.sh/helm/v3/pkg/kube/client.go b/vendor/helm.sh/helm/v3/pkg/kube/client.go index 9df833a434..d979fd22cd 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/client.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/client.go @@ -124,7 +124,7 @@ func (c *Client) getKubeClient() (*kubernetes.Clientset, error) { func (c *Client) IsReachable() error { client, err := c.getKubeClient() if err == genericclioptions.ErrEmptyConfig { - // re-replace kubernetes ErrEmptyConfig error with a friendy error + // re-replace kubernetes ErrEmptyConfig error with a friendly error // moar workarounds for Kubernetes API breaking. return errors.New("Kubernetes cluster unreachable") } @@ -635,7 +635,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P // Get a versioned object versionedObject := AsVersioned(target) - // Unstructured objects, such as CRDs, may not have an not registered error + // Unstructured objects, such as CRDs, may not have a not registered error // returned from ConvertToVersion. Anything that's unstructured should // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported // on objects like CRDs. diff --git a/vendor/helm.sh/helm/v3/pkg/kube/ready.go b/vendor/helm.sh/helm/v3/pkg/kube/ready.go index b2d26ba761..55c4a39bf1 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/ready.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/ready.go @@ -426,7 +426,7 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { return false } // This check only makes sense when all partitions are being upgraded otherwise during a - // partioned rolling upgrade, this condition will never evaluate to true, leading to + // partitioned rolling upgrade, this condition will never evaluate to true, leading to // error. if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision) diff --git a/vendor/helm.sh/helm/v3/pkg/registry/util.go b/vendor/helm.sh/helm/v3/pkg/registry/util.go index 45fbdd0b51..727cdae033 100644 --- a/vendor/helm.sh/helm/v3/pkg/registry/util.go +++ b/vendor/helm.sh/helm/v3/pkg/registry/util.go @@ -65,8 +65,7 @@ func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (str // If string is empty, set wildcard constraint constraint, _ = semver.NewConstraint("*") } else { - // when customer input exact version, check whether have exact match - // one first + // when customer inputs specific version, check whether there's an exact match first for _, v := range tags { if versionString == v { return v, nil diff --git a/vendor/helm.sh/helm/v3/pkg/release/status.go b/vendor/helm.sh/helm/v3/pkg/release/status.go index e0e3ed62a9..edd27a5f14 100644 --- a/vendor/helm.sh/helm/v3/pkg/release/status.go +++ b/vendor/helm.sh/helm/v3/pkg/release/status.go @@ -31,13 +31,13 @@ const ( StatusSuperseded Status = "superseded" // StatusFailed indicates that the release was not successfully deployed. StatusFailed Status = "failed" - // StatusUninstalling indicates that a uninstall operation is underway. + // StatusUninstalling indicates that an uninstall operation is underway. StatusUninstalling Status = "uninstalling" // StatusPendingInstall indicates that an install operation is underway. StatusPendingInstall Status = "pending-install" // StatusPendingUpgrade indicates that an upgrade operation is underway. StatusPendingUpgrade Status = "pending-upgrade" - // StatusPendingRollback indicates that an rollback operation is underway. + // StatusPendingRollback indicates that a rollback operation is underway. StatusPendingRollback Status = "pending-rollback" ) diff --git a/vendor/helm.sh/helm/v3/pkg/repo/index.go b/vendor/helm.sh/helm/v3/pkg/repo/index.go index 40b11c5cf2..e1ce3c62dd 100644 --- a/vendor/helm.sh/helm/v3/pkg/repo/index.go +++ b/vendor/helm.sh/helm/v3/pkg/repo/index.go @@ -200,7 +200,7 @@ func (i IndexFile) Get(name, version string) (*ChartVersion, error) { } } - // when customer input exact version, check whether have exact match one first + // when customer inputs specific version, check whether there's an exact match first if len(version) != 0 { for _, ver := range vs { if version == ver.Version { @@ -371,6 +371,8 @@ func loadIndex(data []byte, source string) (*IndexFile, error) { cvs = append(cvs[:idx], cvs[idx+1:]...) } } + // adjust slice to only contain a set of valid versions + i.Entries[name] = cvs } i.SortEntries() if i.APIVersion == "" { @@ -397,7 +399,7 @@ func jsonOrYamlUnmarshal(b []byte, i interface{}) error { // the error isn't important for index loading // // In particular, charts may introduce validations that don't impact repository indexes -// And repository indexes may be generated by older/non-complient software, which doesn't +// And repository indexes may be generated by older/non-compliant software, which doesn't // conform to all validations. func ignoreSkippableChartValidationError(err error) error { verr, ok := err.(chart.ValidationError) diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go index 2ef951184b..33bde9b6a4 100644 --- a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go @@ -72,8 +72,8 @@ const ( // Following limits based on k8s labels limits - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set const ( - sqlCustomLabelsTableKeyMaxLenght = 253 + 1 + 63 - sqlCustomLabelsTableValueMaxLenght = 63 + sqlCustomLabelsTableKeyMaxLength = 253 + 1 + 63 + sqlCustomLabelsTableValueMaxLength = 63 ) const ( @@ -119,7 +119,7 @@ func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { } } - // check if all migrations appliyed + // check if all migrations applied if len(migrationsIDs) != 0 { for id := range migrationsIDs { s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id) @@ -204,7 +204,7 @@ func (s *SQL) ensureDBSetup() error { CREATE TABLE %s ( %s VARCHAR(64), %s VARCHAR(67), - %s VARCHAR(%d), + %s VARCHAR(%d), %s VARCHAR(%d) ); CREATE INDEX ON %s (%s, %s); @@ -216,9 +216,9 @@ func (s *SQL) ensureDBSetup() error { sqlCustomLabelsTableReleaseKeyColumn, sqlCustomLabelsTableReleaseNamespaceColumn, sqlCustomLabelsTableKeyColumn, - sqlCustomLabelsTableKeyMaxLenght, + sqlCustomLabelsTableKeyMaxLength, sqlCustomLabelsTableValueColumn, - sqlCustomLabelsTableValueMaxLenght, + sqlCustomLabelsTableValueMaxLength, sqlCustomLabelsTableName, sqlCustomLabelsTableReleaseKeyColumn, sqlCustomLabelsTableReleaseNamespaceColumn, diff --git a/vendor/helm.sh/helm/v3/pkg/strvals/parser.go b/vendor/helm.sh/helm/v3/pkg/strvals/parser.go index 2828f20c08..a0e8d66d15 100644 --- a/vendor/helm.sh/helm/v3/pkg/strvals/parser.go +++ b/vendor/helm.sh/helm/v3/pkg/strvals/parser.go @@ -436,7 +436,7 @@ func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interfa // check for an empty value // read and consume optional spaces until comma or EOF (empty val) or any other char (not empty val) -// comma and spaces are consumed, while any other char is not cosumed +// comma and spaces are consumed, while any other char is not consumed func (t *parser) emptyVal() (bool, error) { for { r, _, e := t.sc.ReadRune() diff --git a/vendor/helm.sh/helm/v3/pkg/time/time.go b/vendor/helm.sh/helm/v3/pkg/time/time.go index 44f3fedfb2..1abe8ae3d8 100644 --- a/vendor/helm.sh/helm/v3/pkg/time/time.go +++ b/vendor/helm.sh/helm/v3/pkg/time/time.go @@ -15,7 +15,7 @@ limitations under the License. */ // Package time contains a wrapper for time.Time in the standard library and -// associated methods. This package mainly exists to workaround an issue in Go +// associated methods. This package mainly exists to work around an issue in Go // where the serializer doesn't omit an empty value for time: // https://github.com/golang/go/issues/11939. As such, this can be removed if a // proposal is ever accepted for Go diff --git a/vendor/k8s.io/kubectl/pkg/util/apply.go b/vendor/k8s.io/kubectl/pkg/util/apply.go new file mode 100644 index 0000000000..77ea593842 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/apply.go @@ -0,0 +1,146 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +var metadataAccessor = meta.NewAccessor() + +// GetOriginalConfiguration retrieves the original configuration of the object +// from the annotation, or nil if no annotation was found. +func GetOriginalConfiguration(obj runtime.Object) ([]byte, error) { + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return nil, err + } + + if annots == nil { + return nil, nil + } + + original, ok := annots[v1.LastAppliedConfigAnnotation] + if !ok { + return nil, nil + } + + return []byte(original), nil +} + +// SetOriginalConfiguration sets the original configuration of the object +// as the annotation on the object for later use in computing a three way patch. +func setOriginalConfiguration(obj runtime.Object, original []byte) error { + if len(original) < 1 { + return nil + } + + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return err + } + + if annots == nil { + annots = map[string]string{} + } + + annots[v1.LastAppliedConfigAnnotation] = string(original) + return metadataAccessor.SetAnnotations(obj, annots) +} + +// GetModifiedConfiguration retrieves the modified configuration of the object. +// If annotate is true, it embeds the result as an annotation in the modified +// configuration. If an object was read from the command input, it will use that +// version of the object. Otherwise, it will use the version from the server. +func GetModifiedConfiguration(obj runtime.Object, annotate bool, codec runtime.Encoder) ([]byte, error) { + // First serialize the object without the annotation to prevent recursion, + // then add that serialization to it as the annotation and serialize it again. + var modified []byte + + // Otherwise, use the server side version of the object. + // Get the current annotations from the object. + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return nil, err + } + + if annots == nil { + annots = map[string]string{} + } + + original := annots[v1.LastAppliedConfigAnnotation] + delete(annots, v1.LastAppliedConfigAnnotation) + if err := metadataAccessor.SetAnnotations(obj, annots); err != nil { + return nil, err + } + + modified, err = runtime.Encode(codec, obj) + if err != nil { + return nil, err + } + + if annotate { + annots[v1.LastAppliedConfigAnnotation] = string(modified) + if err := metadataAccessor.SetAnnotations(obj, annots); err != nil { + return nil, err + } + + modified, err = runtime.Encode(codec, obj) + if err != nil { + return nil, err + } + } + + // Restore the object to its original condition. + annots[v1.LastAppliedConfigAnnotation] = original + if err := metadataAccessor.SetAnnotations(obj, annots); err != nil { + return nil, err + } + + return modified, nil +} + +// updateApplyAnnotation calls CreateApplyAnnotation if the last applied +// configuration annotation is already present. Otherwise, it does nothing. +func updateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { + if original, err := GetOriginalConfiguration(obj); err != nil || len(original) <= 0 { + return err + } + return CreateApplyAnnotation(obj, codec) +} + +// CreateApplyAnnotation gets the modified configuration of the object, +// without embedding it again, and then sets it on the object as the annotation. +func CreateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { + modified, err := GetModifiedConfiguration(obj, false, codec) + if err != nil { + return err + } + return setOriginalConfiguration(obj, modified) +} + +// CreateOrUpdateAnnotation creates the annotation used by +// kubectl apply only when createAnnotation is true +// Otherwise, only update the annotation when it already exists +func CreateOrUpdateAnnotation(createAnnotation bool, obj runtime.Object, codec runtime.Encoder) error { + if createAnnotation { + return CreateApplyAnnotation(obj, codec) + } + return updateApplyAnnotation(obj, codec) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/pod_port.go b/vendor/k8s.io/kubectl/pkg/util/pod_port.go new file mode 100644 index 0000000000..bcd2c72818 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/pod_port.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/api/core/v1" +) + +// LookupContainerPortNumberByName find containerPort number by its named port name +func LookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) { + for _, ctr := range pod.Spec.Containers { + for _, ctrportspec := range ctr.Ports { + if ctrportspec.Name == name { + return ctrportspec.ContainerPort, nil + } + } + } + for _, ctr := range pod.Spec.InitContainers { + for _, ctrportspec := range ctr.Ports { + if ctrportspec.Name == name { + return ctrportspec.ContainerPort, nil + } + } + } + return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go b/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go new file mode 100644 index 0000000000..642a6d47a7 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go @@ -0,0 +1,251 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podutils + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := getPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *corev1.Pod) bool { + return isPodReadyConditionTrue(pod.Status) +} + +func isPodDeleting(pod *corev1.Pod) bool { + return pod.DeletionTimestamp != nil +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func isPodReadyConditionTrue(status corev1.PodStatus) bool { + condition := getPodReadyCondition(status) + return condition != nil && condition.Status == corev1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition { + _, condition := getPodCondition(&status, corev1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if status == nil { + return -1, nil + } + return getPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. +type ByLogging []*corev1.Pod + +func (s ByLogging) Len() int { return len(s) } +func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ByLogging) Less(i, j int) bool { + // 1. assigned < unassigned + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) > 0 + } + // 2. PodRunning < PodUnknown < PodPending + m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. ready < not ready + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for more time < less time < empty time + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. older pods < newer pods < empty timestamp pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp) + } + return false +} + +// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. +type ActivePods []*corev1.Pod + +func (s ActivePods) Len() int { return len(s) } +func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ActivePods) Less(i, j int) bool { + // 1. Unassigned < assigned + // If only one of the pods is unassigned, the unassigned one is smaller + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) == 0 + } + // 2. PodPending < PodUnknown < PodRunning + m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. Not ready < ready + // If only one of the pods is not ready, the not ready one is smaller + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return !IsPodReady(s[i]) + } + // 4. Deleting < Not deleting + if isPodDeleting(s[i]) != isPodDeleting(s[j]) { + return isPodDeleting(s[i]) + } + // 5. Older deletion timestamp < newer deletion timestamp + if isPodDeleting(s[i]) && isPodDeleting(s[j]) && !s[i].ObjectMeta.DeletionTimestamp.Equal(s[j].ObjectMeta.DeletionTimestamp) { + return s[i].ObjectMeta.DeletionTimestamp.Before(s[j].ObjectMeta.DeletionTimestamp) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 6. Been ready for empty time < less time < more time + // If both pods are ready, the latest ready one is smaller + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) + } + // 7. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 8. Empty creation time pods < newer pods < older pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp) + } + return false +} + +// afterOrZero checks if time t1 is after time t2; if one of them +// is zero, the zero time is seen as after non-zero time. +func afterOrZero(t1, t2 *metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return t1.Time.IsZero() + } + return t1.After(t2.Time) +} + +func podReadyTime(pod *corev1.Pod) *metav1.Time { + for _, c := range pod.Status.Conditions { + // we only care about pod ready conditions + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return &c.LastTransitionTime + } + } + return &metav1.Time{} +} + +func maxContainerRestarts(pod *corev1.Pod) int { + maxRestarts := 0 + for _, c := range pod.Status.ContainerStatuses { + maxRestarts = max(maxRestarts, int(c.RestartCount)) + } + return maxRestarts +} + +// ContainerType and VisitContainers are taken from +// https://github.com/kubernetes/kubernetes/blob/master/pkg/api/v1/pod/util.go +// kubectl cannot directly import this due to project goals + +// ContainerType signifies container type +type ContainerType int + +const ( + // Containers is for normal containers + Containers ContainerType = 1 << iota + // InitContainers is for init containers + InitContainers + // EphemeralContainers is for ephemeral containers + EphemeralContainers +) + +// AllContainers specifies that all containers be visited. +const AllContainers ContainerType = (InitContainers | Containers | EphemeralContainers) + +// ContainerVisitor is called with each container spec, and returns true +// if visiting should continue. +type ContainerVisitor func(container *corev1.Container, containerType ContainerType) (shouldContinue bool) + +// VisitContainers invokes the visitor function with a pointer to every container +// spec in the given pod spec with type set in mask. If visitor returns false, +// visiting is short-circuited. VisitContainers returns true if visiting completes, +// false if visiting was short-circuited. +func VisitContainers(podSpec *corev1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool { + if mask&InitContainers != 0 { + for i := range podSpec.InitContainers { + if !visitor(&podSpec.InitContainers[i], InitContainers) { + return false + } + } + } + if mask&Containers != 0 { + for i := range podSpec.Containers { + if !visitor(&podSpec.Containers[i], Containers) { + return false + } + } + } + if mask&EphemeralContainers != 0 { + for i := range podSpec.EphemeralContainers { + if !visitor((*corev1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) { + return false + } + } + } + return true +} diff --git a/vendor/k8s.io/kubectl/pkg/util/service_port.go b/vendor/k8s.io/kubectl/pkg/util/service_port.go new file mode 100644 index 0000000000..bc56ab7d6a --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/service_port.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// LookupContainerPortNumberByServicePort implements +// the handling of resolving container named port, as well as ignoring targetPort when clusterIP=None +// It returns an error when a named port can't find a match (with -1 returned), or when the service does not +// declare such port (with the input port number returned). +func LookupContainerPortNumberByServicePort(svc v1.Service, pod v1.Pod, port int32) (int32, error) { + for _, svcportspec := range svc.Spec.Ports { + if svcportspec.Port != port { + continue + } + if svc.Spec.ClusterIP == v1.ClusterIPNone { + return port, nil + } + if svcportspec.TargetPort.Type == intstr.Int { + if svcportspec.TargetPort.IntValue() == 0 { + // targetPort is omitted, and the IntValue() would be zero + return svcportspec.Port, nil + } + return int32(svcportspec.TargetPort.IntValue()), nil + } + return LookupContainerPortNumberByName(pod, svcportspec.TargetPort.String()) + } + return port, fmt.Errorf("Service %s does not have a service port %d", svc.Name, port) +} + +// LookupServicePortNumberByName find service port number by its named port name +func LookupServicePortNumberByName(svc v1.Service, name string) (int32, error) { + for _, svcportspec := range svc.Spec.Ports { + if svcportspec.Name == name { + return svcportspec.Port, nil + } + } + + return int32(-1), fmt.Errorf("Service '%s' does not have a named port '%s'", svc.Name, name) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/umask.go b/vendor/k8s.io/kubectl/pkg/util/umask.go new file mode 100644 index 0000000000..3f0c4e83e6 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/umask.go @@ -0,0 +1,29 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "golang.org/x/sys/unix" +) + +// Umask is a wrapper for `unix.Umask()` on non-Windows platforms +func Umask(mask int) (old int, err error) { + return unix.Umask(mask), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/util/umask_windows.go b/vendor/k8s.io/kubectl/pkg/util/umask_windows.go new file mode 100644 index 0000000000..67f6efb974 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/umask_windows.go @@ -0,0 +1,29 @@ +//go:build windows +// +build windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" +) + +// Umask returns an error on Windows +func Umask(mask int) (int, error) { + return 0, errors.New("platform and architecture is not supported") +} diff --git a/vendor/k8s.io/kubectl/pkg/util/util.go b/vendor/k8s.io/kubectl/pkg/util/util.go new file mode 100644 index 0000000000..ea57d3b39b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/util.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/md5" + "errors" + "fmt" + "path" + "path/filepath" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. +func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { + if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { + return metav1.Time{Time: t}, nil + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return metav1.Time{}, err + } + return metav1.Time{Time: t}, nil +} + +// HashObject returns the hash of a Object hash by a Codec +func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { + data, err := runtime.Encode(codec, obj) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", md5.Sum(data)), nil +} + +// ParseFileSource parses the source given. +// +// Acceptable formats include: +// 1. source-path: the basename will become the key name +// 2. source-name=source-path: the source-name will become the key name and +// source-path is the path to the key file. +// +// Key names cannot include '='. +func ParseFileSource(source string) (keyName, filePath string, err error) { + numSeparators := strings.Count(source, "=") + switch { + case numSeparators == 0: + return path.Base(filepath.ToSlash(source)), source, nil + case numSeparators == 1 && strings.HasPrefix(source, "="): + return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "=")) + case numSeparators == 1 && strings.HasSuffix(source, "="): + return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "=")) + case numSeparators > 1: + return "", "", errors.New("key names or file paths cannot contain '='") + default: + components := strings.Split(source, "=") + return components[0], components[1], nil + } +} + +// ParseLiteralSource parses the source key=val pair into its component pieces. +// This functionality is distinguished from strings.SplitN(source, "=", 2) since +// it returns an error in the case of empty keys, values, or a missing equals sign. +func ParseLiteralSource(source string) (keyName, value string, err error) { + // leading equal is invalid + if strings.Index(source, "=") == 0 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + // split after the first equal (so values can have the = character) + items := strings.SplitN(source, "=", 2) + if len(items) != 2 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + + return items[0], items[1], nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3d060f9e09..5919459fef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -55,10 +55,10 @@ github.com/chai2010/gettext-go github.com/chai2010/gettext-go/mo github.com/chai2010/gettext-go/plural github.com/chai2010/gettext-go/po -# github.com/cilium/charts v0.0.0-20240926142256-e20f2b5f5344 +# github.com/cilium/charts v0.0.0-20241015090923-1f4c1b5ac12a ## explicit; go 1.17 github.com/cilium/charts -# github.com/cilium/cilium v1.17.0-pre.1 +# github.com/cilium/cilium v1.17.0-pre.2 ## explicit; go 1.23.0 github.com/cilium/cilium/api/v1/client github.com/cilium/cilium/api/v1/client/bgp @@ -135,11 +135,13 @@ github.com/cilium/cilium/pkg/command/exec github.com/cilium/cilium/pkg/common github.com/cilium/cilium/pkg/comparator github.com/cilium/cilium/pkg/container/bitlpm +github.com/cilium/cilium/pkg/container/set github.com/cilium/cilium/pkg/container/versioned github.com/cilium/cilium/pkg/controller github.com/cilium/cilium/pkg/crypto/certificatemanager github.com/cilium/cilium/pkg/datapath/linux/config/defines github.com/cilium/cilium/pkg/datapath/linux/probes +github.com/cilium/cilium/pkg/datapath/linux/safenetlink github.com/cilium/cilium/pkg/datapath/loader/metrics github.com/cilium/cilium/pkg/datapath/tables github.com/cilium/cilium/pkg/datapath/tunnel @@ -160,13 +162,13 @@ github.com/cilium/cilium/pkg/health/defaults github.com/cilium/cilium/pkg/hive github.com/cilium/cilium/pkg/hive/health github.com/cilium/cilium/pkg/hive/health/types +github.com/cilium/cilium/pkg/hubble github.com/cilium/cilium/pkg/iana github.com/cilium/cilium/pkg/identity github.com/cilium/cilium/pkg/identity/identitymanager github.com/cilium/cilium/pkg/identity/key github.com/cilium/cilium/pkg/identity/model github.com/cilium/cilium/pkg/idpool -github.com/cilium/cilium/pkg/inctimer github.com/cilium/cilium/pkg/ip github.com/cilium/cilium/pkg/ipam/option github.com/cilium/cilium/pkg/ipam/types @@ -245,6 +247,7 @@ github.com/cilium/cilium/pkg/policy/types github.com/cilium/cilium/pkg/promise github.com/cilium/cilium/pkg/rate github.com/cilium/cilium/pkg/rate/metrics +github.com/cilium/cilium/pkg/resiliency github.com/cilium/cilium/pkg/safeio github.com/cilium/cilium/pkg/safetime github.com/cilium/cilium/pkg/service/store @@ -271,12 +274,14 @@ github.com/cilium/ebpf/internal/sysenc github.com/cilium/ebpf/internal/tracefs github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link -# github.com/cilium/hive v0.0.0-20240926131619-aa37668760f2 +# github.com/cilium/hive v0.0.0-20241021113747-bb8f3c0bede4 ## explicit; go 1.21.3 github.com/cilium/hive github.com/cilium/hive/cell github.com/cilium/hive/internal github.com/cilium/hive/job +github.com/cilium/hive/script +github.com/cilium/hive/script/internal/diff # github.com/cilium/proxy v0.0.0-20240909042906-ae435a5bef38 ## explicit; go 1.22 github.com/cilium/proxy/go/cilium/api @@ -527,7 +532,7 @@ github.com/cilium/proxy/go/envoy/type/tracing/v3 github.com/cilium/proxy/go/envoy/type/v3 github.com/cilium/proxy/go/envoy/watchdog/v3 github.com/cilium/proxy/pkg/policy/api/kafka -# github.com/cilium/statedb v0.3.0 +# github.com/cilium/statedb v0.3.2 ## explicit; go 1.23 github.com/cilium/statedb github.com/cilium/statedb/index @@ -658,7 +663,7 @@ github.com/evanphx/json-patch # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d ## explicit github.com/exponent-io/jsonpath -# github.com/fatih/color v1.17.0 +# github.com/fatih/color v1.18.0 ## explicit; go 1.17 github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 @@ -975,7 +980,7 @@ github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.20.4 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -986,8 +991,8 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.59.1 -## explicit; go 1.20 +# github.com/prometheus/common v0.60.0 +## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -1056,7 +1061,7 @@ github.com/spf13/viper/internal/features # github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/vishvananda/netlink v1.3.0 +# github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl @@ -1141,7 +1146,7 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.30.0 +# go.opentelemetry.io/otel v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1158,11 +1163,11 @@ go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.24.0 -# go.opentelemetry.io/otel/metric v1.30.0 +# go.opentelemetry.io/otel/metric v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.30.0 +# go.opentelemetry.io/otel/trace v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1202,7 +1207,7 @@ go.uber.org/zap/zapgrpc # go4.org/netipx v0.0.0-20231129151722-fdeea329fbba ## explicit; go 1.18 go4.org/netipx -# golang.org/x/crypto v0.27.0 +# golang.org/x/crypto v0.28.0 ## explicit; go 1.20 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -1229,7 +1234,7 @@ golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/net v0.29.0 +# golang.org/x/net v0.30.0 ## explicit; go 1.18 golang.org/x/net/context/ctxhttp golang.org/x/net/html @@ -1243,7 +1248,7 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1251,16 +1256,16 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.25.0 +# golang.org/x/sys v0.26.0 ## explicit; go 1.18 golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.24.0 +# golang.org/x/term v0.25.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.18.0 +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -1272,18 +1277,21 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.6.0 +# golang.org/x/time v0.7.0 ## explicit; go 1.18 golang.org/x/time/rate +# golang.org/x/tools v0.26.0 +## explicit; go 1.22.0 +golang.org/x/tools/txtar # google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.67.0 +# google.golang.org/grpc v1.67.1 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1341,8 +1349,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.35.1 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -1399,7 +1407,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.16.1 +# helm.sh/helm/v3 v3.16.2 ## explicit; go 1.22.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -1441,7 +1449,7 @@ helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.31.1 +# k8s.io/api v0.31.2 ## explicit; go 1.22.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1501,7 +1509,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.31.1 +# k8s.io/apiextensions-apiserver v0.31.2 ## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1517,7 +1525,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake -# k8s.io/apimachinery v0.31.1 +# k8s.io/apimachinery v0.31.2 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1581,16 +1589,16 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.31.1 +# k8s.io/apiserver v0.31.2 ## explicit; go 1.22.0 k8s.io/apiserver/pkg/endpoints/deprecation -# k8s.io/cli-runtime v0.31.1 +# k8s.io/cli-runtime v0.31.2 ## explicit; go 1.22.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.31.1 +# k8s.io/client-go v0.31.2 ## explicit; go 1.22.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1813,7 +1821,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.31.1 +# k8s.io/component-base v0.31.2 ## explicit; go 1.22.0 k8s.io/component-base/version # k8s.io/klog/v2 v2.130.1 @@ -1837,13 +1845,15 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.31.0 +# k8s.io/kubectl v0.31.1 ## explicit; go 1.22.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme +k8s.io/kubectl/pkg/util k8s.io/kubectl/pkg/util/i18n k8s.io/kubectl/pkg/util/interrupt k8s.io/kubectl/pkg/util/openapi +k8s.io/kubectl/pkg/util/podutils k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation @@ -1876,7 +1886,7 @@ oras.land/oras-go/pkg/target # sigs.k8s.io/controller-runtime v0.19.0 ## explicit; go 1.22.0 sigs.k8s.io/controller-runtime/pkg/client/apiutil -# sigs.k8s.io/gateway-api v1.2.0-rc1.0.20240923191000-5c5fc388829d +# sigs.k8s.io/gateway-api v1.2.0 ## explicit; go 1.22.0 sigs.k8s.io/gateway-api/apis/v1 sigs.k8s.io/gateway-api/apis/v1alpha2 @@ -1967,7 +1977,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/mcs-api v0.1.1-0.20240919125245-7bbb5990134a +# sigs.k8s.io/mcs-api v0.1.1-0.20241002142749-eff1ba8c3ab2 ## explicit; go 1.22.0 sigs.k8s.io/mcs-api/pkg/apis/v1alpha1 sigs.k8s.io/mcs-api/pkg/client/clientset/versioned diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/grpcroute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/grpcroute_types.go index 0b0bb4e85a..0750c2cb6b 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/grpcroute_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/grpcroute_types.go @@ -24,6 +24,7 @@ import ( // +genclient // +kubebuilder:object:root=true +// +kubebuilder:skipversion // +kubebuilder:deprecatedversion:warning="The v1alpha2 version of GRPCRoute has been deprecated and will be removed in a future release of the API. Please upgrade to v1." type GRPCRoute v1.GRPCRoute diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/referencegrant_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/referencegrant_types.go index d673062608..372022f77c 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/referencegrant_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1alpha2/referencegrant_types.go @@ -26,6 +26,7 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:categories=gateway-api,shortName=refgrant // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:skipversion // +kubebuilder:deprecatedversion:warning="The v1alpha2 version of ReferenceGrant has been deprecated and will be removed in a future release of the API. Please upgrade to v1beta1." // ReferenceGrant identifies kinds of resources in other namespaces that are